diff --git a/gcloud-java-compute/pom.xml b/gcloud-java-compute/pom.xml index 5bf336e01a2f..6fc456442024 100644 --- a/gcloud-java-compute/pom.xml +++ b/gcloud-java-compute/pom.xml @@ -24,7 +24,7 @@ com.google.apis google-api-services-compute - v1-rev97-1.21.0 + v1-rev103-1.21.0 compile diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java index 300d38908a8b..aaedbccf13bb 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java @@ -161,7 +161,6 @@ enum ZoneField { CREATION_TIMESTAMP("creationTimestamp"), DESCRIPTION("description"), ID("id"), - MAINTENANCE_WINDOWS("maintenanceWindows"), NAME("name"), REGION("region"), SELF_LINK("selfLink"), @@ -917,6 +916,54 @@ public static ImageFilter notEquals(ImageField field, long value) { } } + /** + * Class for filtering disk lists. + */ + class DiskFilter extends ListFilter { + + private static final long serialVersionUID = 5856790665396877913L; + + private DiskFilter(DiskField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskFilter equals(DiskField field, String value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskFilter notEquals(DiskField field, String value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static DiskFilter equals(DiskField field, long value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static DiskFilter notEquals(DiskField field, long value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + /** * Class for specifying disk type get options. */ @@ -1585,6 +1632,112 @@ public static ImageListOption fields(ImageField... fields) { } } + /** + * Class for specifying disk get options. + */ + class DiskOption extends Option { + + private static final long serialVersionUID = -4354796876226661667L; + + private DiskOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the disk's fields to be returned by the RPC call. If this option + * is not provided, all disk's fields are returned. {@code DiskOption.fields} can be used to + * specify only the fields of interest. {@link Disk#diskId()}, + * {@link DiskConfiguration#diskType()} and either + * {@link SnapshotDiskConfiguration#sourceSnapshot()} or + * {@link ImageDiskConfiguration#sourceImage()} are always returned, even if not specified. + */ + public static DiskOption fields(DiskField... fields) { + return new DiskOption(ComputeRpc.Option.FIELDS, DiskField.selector(fields)); + } + } + + /** + * Class for specifying disk list options. + */ + class DiskListOption extends Option { + + private static final long serialVersionUID = -5148497888688645905L; + + private DiskListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disks being listed. + */ + public static DiskListOption filter(DiskFilter filter) { + return new DiskListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disks returned per page. {@code pageSize} + * must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskListOption pageSize(long pageSize) { + return new DiskListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disks. + */ + public static DiskListOption pageToken(String pageToken) { + return new DiskListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the disk's fields to be returned by the RPC call. If this option + * is not provided, all disk's fields are returned. {@code DiskListOption.fields} can be used to + * specify only the fields of interest. {@link Disk#diskId()}, + * {@link DiskConfiguration#diskType()} and either + * {@link SnapshotDiskConfiguration#sourceSnapshot()} or + * {@link ImageDiskConfiguration#sourceImage()} are always returned, even if not specified. + */ + public static DiskListOption fields(DiskField... fields) { + StringBuilder builder = new StringBuilder(); + builder.append("items(").append(DiskField.selector(fields)).append("),nextPageToken"); + return new DiskListOption(ComputeRpc.Option.FIELDS, builder.toString()); + } + } + + /** + * Class for specifying disk aggregated list options. + */ + class DiskAggregatedListOption extends Option { + + private static final long serialVersionUID = 1163784797870242766L; + + private DiskAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disks being listed. + */ + public static DiskAggregatedListOption filter(DiskFilter filter) { + return new DiskAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disks returned per page. {@code pageSize} + * must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskAggregatedListOption pageSize(long pageSize) { + return new DiskAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disks. + */ + public static DiskAggregatedListOption pageToken(String pageToken) { + return new DiskAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + /** * Returns the requested disk type or {@code null} if not found. * @@ -1770,8 +1923,7 @@ public static ImageListOption fields(ImageField... fields) { /** * Creates a new snapshot. * - * @return a zone operation if the create request was issued correctly, {@code null} if - * {@code snapshot.sourceDisk} was not found + * @return a zone operation for snapshot creation * @throws ComputeException upon failure */ Operation create(SnapshotInfo snapshot, OperationOption... options); @@ -1868,4 +2020,51 @@ public static ImageListOption fields(ImageField... fields) { */ Operation deprecate(ImageId image, DeprecationStatus deprecationStatus, OperationOption... options); + + /** + * Returns the requested disk or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Disk get(DiskId diskId, DiskOption... options); + + /** + * Creates a new disk. + * + * @return a zone operation for disk's creation + * @throws ComputeException upon failure + */ + Operation create(DiskInfo disk, OperationOption... options); + + /** + * Lists disks for the provided zone. + * + * @throws ComputeException upon failure + */ + Page listDisks(String zone, DiskListOption... options); + + /** + * Lists disks for all zones. + * + * @throws ComputeException upon failure + */ + Page listDisks(DiskAggregatedListOption... options); + + /** + * Deletes the requested disk. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure + */ + Operation delete(DiskId disk, OperationOption... options); + + /** + * Resizes the disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + Operation resize(DiskId disk, long sizeGb, OperationOption... options); } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java index 1f875ad337f1..84e42d33fe10 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java @@ -313,6 +313,46 @@ public Page nextPage() { } } + private static class DiskPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 4146589787872718476L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + DiskPageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listDisks(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedDiskPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5240045334115926206L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedDiskPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listDisks(serviceOptions, requestOptions); + } + } + private final ComputeRpc computeRpc; ComputeImpl(ComputeOptions options) { @@ -1161,6 +1201,141 @@ public com.google.api.services.compute.model.Operation call() { } } + @Override + public Disk get(final DiskId diskId, DiskOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Disk answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Disk call() { + return computeRpc.getDisk(diskId.zone(), diskId.disk(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Disk.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(final DiskInfo disk, OperationOption... options) { + final com.google.api.services.compute.model.Disk diskPb = + disk.setProjectId(options().projectId()).toPb(); + final Map optionsMap = optionMap(options); + try { + return Operation.fromPb(this, + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createDisk(disk.diskId().zone(), diskPb, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER)); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function diskFromPb( + final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Disk apply(com.google.api.services.compute.model.Disk disk) { + return Disk.fromPb(serviceOptions.service(), disk); + } + }; + } + + @Override + public Page listDisks(String zone, DiskListOption... options) { + return listDisks(zone, options(), optionMap(options)); + } + + private static Page listDisks(final String zone, final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDisks(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable disks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), diskFromPb(serviceOptions)); + return new PageImpl<>(new DiskPageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, disks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listDisks(DiskAggregatedListOption... options) { + return listDisks(options(), optionMap(options)); + } + + private static Page listDisks(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDisks(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable disks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), diskFromPb(serviceOptions)); + return new PageImpl<>(new AggregatedDiskPageFetcher(serviceOptions, cursor, optionsMap), + cursor, disks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation delete(final DiskId disk, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteDisk(disk.zone(), disk.disk(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation resize(final DiskId disk, final long sizeGb, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.resizeDisk(disk.zone(), disk.disk(), sizeGb, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + private Map optionMap(Option... options) { Map optionMap = Maps.newEnumMap(ComputeRpc.Option.class); for (Option option : options) { diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Disk.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Disk.java new file mode 100644 index 000000000000..265996a56af6 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Disk.java @@ -0,0 +1,256 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.gcloud.compute.Compute.DiskOption; +import com.google.gcloud.compute.Compute.OperationOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine persistent disk. A disk can be used as primary storage for your virtual + * machine instances. Objects of this class are immutable. To get a {@code Disk} object with the + * most recent information use {@link #reload}. {@code Disk} adds a layer of service-related + * functionality over {@link DiskInfo}. + * + * @see Block Storage + */ +public class Disk extends DiskInfo { + + private static final long serialVersionUID = 7234747955588262204L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Disk} objects. + */ + public static class Builder extends DiskInfo.Builder { + + private final Compute compute; + private final DiskInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, DiskId diskId, DiskConfiguration diskConfiguration) { + this.compute = compute; + this.infoBuilder = new DiskInfo.BuilderImpl(diskId, diskConfiguration); + } + + Builder(Disk disk) { + this.compute = disk.compute; + this.infoBuilder = new DiskInfo.BuilderImpl(disk); + } + + @Override + Builder id(String id) { + infoBuilder.id(id); + return this; + } + + @Override + public Builder configuration(DiskConfiguration configuration) { + infoBuilder.configuration(configuration); + return this; + } + + @Override + public Builder diskId(DiskId diskId) { + infoBuilder.diskId(diskId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + Builder creationStatus(CreationStatus creationStatus) { + infoBuilder.creationStatus(creationStatus); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + Builder licenses(List licenses) { + infoBuilder.licenses(licenses); + return this; + } + + @Override + Builder attachedInstances(List attachedInstances) { + infoBuilder.attachedInstances(attachedInstances); + return this; + } + + @Override + Builder lastAttachTimestamp(Long lastAttachTimestamp) { + infoBuilder.lastAttachTimestamp(lastAttachTimestamp); + return this; + } + + @Override + Builder lastDetachTimestamp(Long lastDetachTimestamp) { + infoBuilder.lastDetachTimestamp(lastDetachTimestamp); + return this; + } + + @Override + public Disk build() { + return new Disk(compute, infoBuilder); + } + } + + Disk(Compute compute, DiskInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this disk exists. + * + * @return {@code true} if this disk exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(DiskOption.fields()) != null; + } + + /** + * Fetches current disk's latest information. Returns {@code null} if the disk does not exist. + * + * @param options disk options + * @return a {@code Disk} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Disk reload(DiskOption... options) { + return compute.get(diskId(), options); + } + + /** + * Deletes this disk. + * + * @return a zone operation if the delete request was successfully sent, {@code null} if the disk + * was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.delete(diskId(), options); + } + + /** + * Creates a snapshot for this disk given the snapshot's name. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + public Operation createSnapshot(String snapshot, OperationOption... options) { + return compute.create(SnapshotInfo.of(SnapshotId.of(snapshot), diskId()), options); + } + + /** + * Creates a snapshot for this disk given the snapshot's name and description. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + public Operation createSnapshot(String snapshot, String description, OperationOption... options) { + SnapshotInfo snapshotInfo = SnapshotInfo.builder(SnapshotId.of(snapshot), diskId()) + .description(description) + .build(); + return compute.create(snapshotInfo, options); + } + + /** + * Creates an image for this disk given the image's name. + * + * @return a global operation if the image creation was successfully requested + * @throws ComputeException upon failure + */ + public Operation createImage(String image, OperationOption... options) { + ImageInfo imageInfo = ImageInfo.of(ImageId.of(image), DiskImageConfiguration.of(diskId())); + return compute.create(imageInfo, options); + } + + /** + * Creates an image for this disk given the image's name and description. + * + * @return a global operation if the image creation was successfully requested + * @throws ComputeException upon failure + */ + public Operation createImage(String image, String description, OperationOption... options) { + ImageInfo imageInfo = ImageInfo.builder(ImageId.of(image), DiskImageConfiguration.of(diskId())) + .description(description) + .build(); + return compute.create(imageInfo, options); + } + + /** + * Resizes this disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the resize request was issued correctly, {@code null} if this disk + * was not found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + public Operation resize(long sizeGb, OperationOption... options) { + return compute.resize(diskId(), sizeGb, options); + } + + /** + * Returns the disk's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + return obj instanceof Disk + && Objects.equals(toPb(), ((Disk) obj).toPb()) + && Objects.equals(options, ((Disk) obj).options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Disk fromPb(Compute compute, com.google.api.services.compute.model.Disk diskPb) { + return new Disk(compute, new DiskInfo.BuilderImpl(diskPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Zone.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Zone.java index 7c766bef27c9..80a6c08f4db1 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Zone.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Zone.java @@ -16,17 +16,14 @@ package com.google.gcloud.compute; -import com.google.api.services.compute.model.Zone.MaintenanceWindows; import com.google.common.base.Function; import com.google.common.base.MoreObjects; -import com.google.common.collect.Lists; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; import java.io.Serializable; import java.math.BigInteger; -import java.util.List; import java.util.Objects; /** @@ -59,7 +56,6 @@ public com.google.api.services.compute.model.Zone apply(Zone region) { private final Long creationTimestamp; private final String description; private final Status status; - private final List maintenanceWindows; private final RegionId region; private final DeprecationStatus deprecationStatus; @@ -71,113 +67,6 @@ public enum Status { DOWN } - /** - * A scheduled maintenance windows for this zone. When a zone is in a maintenance window, all - * resources which reside in the zone will be unavailable. - * - * @see Maintenance - * Windows - */ - public static final class MaintenanceWindow implements Serializable { - - static final Function FROM_PB_FUNCTION = - new Function() { - @Override - public MaintenanceWindow apply(MaintenanceWindows pb) { - return MaintenanceWindow.fromPb(pb); - } - }; - static final Function TO_PB_FUNCTION = - new Function() { - @Override - public MaintenanceWindows apply(MaintenanceWindow maintenanceWindow) { - return maintenanceWindow.toPb(); - } - }; - - private static final long serialVersionUID = 2270641266683329963L; - - private final String name; - private final String description; - private final Long beginTime; - private final Long endTime; - - /** - * Returns a zone maintenance window object. - */ - MaintenanceWindow(String name, String description, Long beginTime, Long endTime) { - this.name = name; - this.description = description; - this.beginTime = beginTime; - this.endTime = endTime; - } - - /** - * Returns the name of the maintenance window. - */ - public String name() { - return name; - } - - /** - * Returns a textual description of the maintenance window. - */ - public String description() { - return description; - } - - /** - * Returns the starting time of the maintenance window in milliseconds since epoch. - */ - public Long beginTime() { - return beginTime; - } - - /** - * Returns the ending time of the maintenance window in milliseconds since epoch. - */ - public Long endTime() { - return endTime; - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("disk", name) - .add("description", description) - .add("beginTime", beginTime) - .add("endTime", endTime) - .toString(); - } - - @Override - public int hashCode() { - return Objects.hash(name, description, beginTime, endTime); - } - - @Override - public boolean equals(Object obj) { - return obj instanceof MaintenanceWindow - && Objects.equals(toPb(), ((MaintenanceWindow) obj).toPb()); - } - - MaintenanceWindows toPb() { - return new MaintenanceWindows() - .setName(name) - .setDescription(description) - .setBeginTime(beginTime != null ? TIMESTAMP_FORMATTER.print(beginTime) : null) - .setEndTime(endTime != null ? TIMESTAMP_FORMATTER.print(endTime) : null); - } - - static MaintenanceWindow fromPb(MaintenanceWindows windowPb) { - return new MaintenanceWindow(windowPb.getName(), windowPb.getDescription(), - windowPb.getBeginTime() != null - ? TIMESTAMP_FORMATTER.parseMillis(windowPb.getBeginTime()) : null, - windowPb.getEndTime() != null - ? TIMESTAMP_FORMATTER.parseMillis(windowPb.getEndTime()) : null); - } - } - static final class Builder { private ZoneId zoneId; @@ -186,7 +75,6 @@ static final class Builder { private String description; private Status status; - private List maintenanceWindows; private RegionId region; private DeprecationStatus deprecationStatus; @@ -217,11 +105,6 @@ Builder status(Status status) { return this; } - Builder maintenanceWindows(List maintenanceWindows) { - this.maintenanceWindows = maintenanceWindows; - return this; - } - Builder region(RegionId region) { this.region = region; return this; @@ -243,7 +126,6 @@ private Zone(Builder builder) { this.creationTimestamp = builder.creationTimestamp; this.description = builder.description; this.status = builder.status; - this.maintenanceWindows = builder.maintenanceWindows; this.region = builder.region; this.deprecationStatus = builder.deprecationStatus; } @@ -283,17 +165,6 @@ public Status status() { return status; } - /** - * Returns the scheduled maintenance windows for this zone, if any. When the zone is in a - * maintenance window, all resources which reside in the zone will be unavailable. - * - * @see Maintenance - * Windows - */ - public List maintenanceWindows() { - return maintenanceWindows; - } - /** * Returns the identity of the region that hosts the zone. */ @@ -318,7 +189,6 @@ public String toString() { .add("creationTimestamp", creationTimestamp) .add("description", description) .add("status", status) - .add("maintenanceWindows", maintenanceWindows) .add("region", region) .add("deprecationStatus", deprecationStatus) .toString(); @@ -349,10 +219,6 @@ com.google.api.services.compute.model.Zone toPb() { if (status != null) { zonePb.setStatus(status.name()); } - if (maintenanceWindows != null) { - zonePb.setMaintenanceWindows( - Lists.transform(maintenanceWindows, MaintenanceWindow.TO_PB_FUNCTION)); - } if (region != null) { zonePb.setRegion(region.selfLink()); } @@ -379,10 +245,6 @@ static Zone fromPb(com.google.api.services.compute.model.Zone zonePb) { if (zonePb.getStatus() != null) { builder.status(Status.valueOf(zonePb.getStatus())); } - if (zonePb.getMaintenanceWindows() != null) { - builder.maintenanceWindows( - Lists.transform(zonePb.getMaintenanceWindows(), MaintenanceWindow.FROM_PB_FUNCTION)); - } if (zonePb.getRegion() != null) { builder.region(RegionId.fromUrl(zonePb.getRegion())); } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java index 523686283db0..1066124e0638 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java @@ -18,6 +18,7 @@ import com.google.api.services.compute.model.Address; import com.google.api.services.compute.model.DeprecationStatus; +import com.google.api.services.compute.model.Disk; import com.google.api.services.compute.model.DiskType; import com.google.api.services.compute.model.Image; import com.google.api.services.compute.model.License; @@ -305,8 +306,7 @@ public Y y() { /** * Creates a snapshot for the specified disk. * - * @return a zone operation if the create request was issued correctly, {@code null} if the disk - * was not found + * @return a zone operation for snapshot creation * @throws ComputeException upon failure */ Operation createSnapshot(String zone, String disk, String snapshot, String description, @@ -377,4 +377,51 @@ Operation createSnapshot(String zone, String disk, String snapshot, String descr */ Operation deprecateImage(String project, String image, DeprecationStatus deprecationStatus, Map options); + + /** + * Returns the requested disk or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Disk getDisk(String zone, String disk, Map options); + + /** + * Creates a new disk. + * + * @return a zone operation for disk's creation + * @throws ComputeException upon failure + */ + Operation createDisk(String zone, Disk disk, Map options); + + /** + * Lists the disks for the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listDisks(String zone, Map options); + + /** + * Lists disks for all zones. + * + * @throws ComputeException upon failure + */ + Tuple> listDisks(Map options); + + /** + * Deletes the requested disk. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure + */ + Operation deleteDisk(String zone, String disk, Map options); + + /** + * Resizes the disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + Operation resizeDisk(String zone, String disk, long sizeGb, Map options); } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java index 7d0b77a7a73a..4899ab9c8445 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java @@ -31,10 +31,15 @@ import com.google.api.services.compute.model.AddressList; import com.google.api.services.compute.model.AddressesScopedList; import com.google.api.services.compute.model.DeprecationStatus; +import com.google.api.services.compute.model.Disk; +import com.google.api.services.compute.model.DiskAggregatedList; +import com.google.api.services.compute.model.DiskList; import com.google.api.services.compute.model.DiskType; import com.google.api.services.compute.model.DiskTypeAggregatedList; import com.google.api.services.compute.model.DiskTypeList; import com.google.api.services.compute.model.DiskTypesScopedList; +import com.google.api.services.compute.model.DisksResizeRequest; +import com.google.api.services.compute.model.DisksScopedList; import com.google.api.services.compute.model.Image; import com.google.api.services.compute.model.ImageList; import com.google.api.services.compute.model.License; @@ -522,7 +527,7 @@ public Operation createSnapshot(String zone, String disk, String snapshot, Strin .setFields(FIELDS.getString(options)) .execute(); } catch (IOException ex) { - return nullForNotFound(ex); + throw translate(ex); } } @@ -633,6 +638,98 @@ public Operation deprecateImage(String project, String image, DeprecationStatus } } + @Override + public Disk getDisk(String zone, String disk, Map options) { + try { + return compute.disks() + .get(this.options.projectId(), zone, disk) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createDisk(String zone, Disk disk, Map options) { + try { + return compute.disks() + .insert(this.options.projectId(), zone, disk) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDisks(String zone, Map options) { + try { + DiskList diskList = compute.disks() + .list(this.options.projectId(), zone) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + .setFields(FIELDS.getString(options)) + .execute(); + Iterable disks = diskList.getItems(); + return Tuple.of(diskList.getNextPageToken(), disks); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDisks(Map options) { + try { + DiskAggregatedList aggregatedList = compute.disks() + .aggregatedList(this.options.projectId()) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (DisksScopedList disksScopedList : scopedList.values()) { + if (disksScopedList.getDisks() != null) { + builder.addAll(disksScopedList.getDisks()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteDisk(String zone, String disk, Map options) { + try { + return compute.disks() + .delete(this.options.projectId(), zone, disk) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation resizeDisk(String zone, String disk, long sizeGb, Map options) { + try { + DisksResizeRequest resizeRequest = new DisksResizeRequest().setSizeGb(sizeGb); + return compute.disks().resize(this.options.projectId(), zone, disk, resizeRequest) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + /** * This method returns {@code null} if the error code of {@code exception} was 404, re-throws the * exception otherwise. diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java index ec981a95e0e7..277f8826b421 100644 --- a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java @@ -38,6 +38,11 @@ import com.google.gcloud.compute.Compute.AddressFilter; import com.google.gcloud.compute.Compute.AddressListOption; import com.google.gcloud.compute.Compute.AddressOption; +import com.google.gcloud.compute.Compute.DiskAggregatedListOption; +import com.google.gcloud.compute.Compute.DiskField; +import com.google.gcloud.compute.Compute.DiskFilter; +import com.google.gcloud.compute.Compute.DiskListOption; +import com.google.gcloud.compute.Compute.DiskOption; import com.google.gcloud.compute.Compute.DiskTypeAggregatedListOption; import com.google.gcloud.compute.Compute.DiskTypeFilter; import com.google.gcloud.compute.Compute.DiskTypeListOption; @@ -66,7 +71,6 @@ import com.google.gcloud.compute.Operation.OperationError; import com.google.gcloud.compute.Operation.OperationWarning; import com.google.gcloud.compute.Operation.Status; -import com.google.gcloud.compute.Zone.MaintenanceWindow; import com.google.gcloud.spi.ComputeRpc; import com.google.gcloud.spi.ComputeRpc.Tuple; import com.google.gcloud.spi.ComputeRpcFactory; @@ -137,18 +141,12 @@ public class ComputeImplTest { .build(); private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); private static final Zone.Status ZONE_STATUS = Zone.Status.DOWN; - private static final MaintenanceWindow WINDOW1 = new MaintenanceWindow("NAME1", "DESCRIPTION1", - 1453293420000L, 1453293480000L); - private static final MaintenanceWindow WINDOW2 = new MaintenanceWindow("NAME2", "DESCRIPTION2", - 1453293420000L, 1453293480000L); - private static final List WINDOWS = ImmutableList.of(WINDOW1, WINDOW2); private static final Zone ZONE = Zone.builder() .zoneId(ZONE_ID) .id(ID) .creationTimestamp(CREATION_TIMESTAMP) .description(DESCRIPTION) .status(ZONE_STATUS) - .maintenanceWindows(WINDOWS) .region(REGION_ID) .build(); private static final LicenseId LICENSE_ID = LicenseId.of("project", "license"); @@ -198,6 +196,8 @@ public class ComputeImplTest { private static final ImageInfo IMAGE = ImageInfo.of(IMAGE_ID, DiskImageConfiguration.of(DISK_ID)); private static final DeprecationStatus DEPRECATION_STATUS = DeprecationStatus.builder(DeprecationStatus.Status.DEPRECATED, IMAGE_ID).build(); + private static final DiskInfo DISK = + DiskInfo.of(DISK_ID, StandardDiskConfiguration.of(DISK_TYPE_ID)); // Empty ComputeRpc options private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); @@ -369,6 +369,28 @@ public class ComputeImplTest { MAX_RESULTS, 42L, FILTER, "diskSizeGb ne 500"); + // Disk options + private static final DiskOption DISK_OPTION_FIELDS = + DiskOption.fields(DiskField.ID, DiskField.DESCRIPTION); + + // Disk list options + private static final DiskFilter DISK_FILTER = DiskFilter.notEquals(DiskField.SIZE_GB, 500L); + private static final DiskListOption DISK_LIST_PAGE_TOKEN = DiskListOption.pageToken("cursor"); + private static final DiskListOption DISK_LIST_PAGE_SIZE = DiskListOption.pageSize(42L); + private static final DiskListOption DISK_LIST_FILTER = DiskListOption.filter(DISK_FILTER); + private static final Map DISK_LIST_OPTIONS = ImmutableMap.of( + PAGE_TOKEN, "cursor", + MAX_RESULTS, 42L, + FILTER, "sizeGb ne 500"); + + // Disk aggregated list options + private static final DiskAggregatedListOption DISK_AGGREGATED_LIST_PAGE_TOKEN = + DiskAggregatedListOption.pageToken("cursor"); + private static final DiskAggregatedListOption DISK_AGGREGATED_LIST_PAGE_SIZE = + DiskAggregatedListOption.pageSize(42L); + private static final DiskAggregatedListOption DISK_AGGREGATED_LIST_FILTER = + DiskAggregatedListOption.filter(DISK_FILTER); + private static final Function OPERATION_TO_PB_FUNCTION = new Function() { @@ -2376,6 +2398,291 @@ public void testListImagesForProjectWithOptions() { assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); } + @Test + public void testGetDisk() { + EasyMock.expect(computeRpcMock.getDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Disk disk = compute.get(DISK_ID); + assertEquals(new Disk(compute, new DiskInfo.BuilderImpl(DISK)), disk); + } + + @Test + public void testGetDisk_Null() { + EasyMock.expect(computeRpcMock.getDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.get(DISK_ID)); + } + + @Test + public void testGetDiskWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), + capture(capturedOptions))).andReturn(DISK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Disk disk = compute.get(DISK_ID, DISK_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("type")); + assertTrue(selector.contains("sourceImage")); + assertTrue(selector.contains("sourceSnapshot")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(55, selector.length()); + assertEquals(new Disk(compute, new DiskInfo.BuilderImpl(DISK)), disk); + } + + @Test + public void testDeleteDisk_Operation() { + EasyMock.expect(computeRpcMock.deleteDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.delete(DISK_ID)); + } + + @Test + public void testDeleteDiskWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.delete(DISK_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testDeleteDisk_Null() { + EasyMock.expect(computeRpcMock.deleteDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.delete(DISK_ID)); + } + + @Test + public void testListDisks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListDisksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ImmutableList nextDiskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + Tuple> nextResult = + Tuple.of(nextCursor, Iterables.transform(nextDiskList, DiskInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListEmptyDisks() { + compute = options.service(); + ImmutableList disks = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, disks); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(disks.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListDisksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), DISK_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone(), DISK_LIST_PAGE_SIZE, DISK_LIST_PAGE_TOKEN, + DISK_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ImmutableList nextDiskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + Tuple> nextResult = + Tuple.of(nextCursor, Iterables.transform(nextDiskList, DiskInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listDisks(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListEmptyDisks() { + compute = options.service(); + ImmutableList diskList = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, diskList); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_AGGREGATED_LIST_PAGE_SIZE, + DISK_AGGREGATED_LIST_PAGE_TOKEN, DISK_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testCreateDisk() { + EasyMock.expect(computeRpcMock.createDisk(DISK_ID.zone(), DISK.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskId diskId = DiskId.of("zone", "disk"); + DiskTypeId diskTypeId = DiskTypeId.of("zone", "diskType"); + DiskInfo disk = DISK.toBuilder() + .diskId(diskId) + .configuration(StandardDiskConfiguration.of(diskTypeId)) + .build(); + Operation operation = compute.create(disk); + assertEquals(zoneOperation, operation); + } + + @Test + public void testCreateDiskWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createDisk(eq(DISK_ID.zone()), eq(DISK.toPb()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(DISK, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testResizeDisk_Operation() { + EasyMock.expect(computeRpcMock.resizeDisk(DISK_ID.zone(), DISK_ID.disk(), 42L, + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.resize(DISK_ID, 42L)); + } + + @Test + public void testResizeDiskWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.resizeDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), eq(42L), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.resize(DISK_ID, 42L, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testResizeDisk_Null() { + EasyMock.expect(computeRpcMock.resizeDisk(DISK_ID.zone(), DISK_ID.disk(), 42L, + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.resize(DISK_ID, 42L)); + } + @Test public void testRetryableException() { EasyMock.expect( diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/DiskTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/DiskTest.java new file mode 100644 index 000000000000..8915a7894147 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/DiskTest.java @@ -0,0 +1,475 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.gcloud.compute.DiskInfo.CreationStatus; + +import org.junit.Test; + +import java.util.List; + +public class DiskTest { + + private static final String ID = "42"; + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final CreationStatus CREATION_STATUS = CreationStatus.READY; + private static final String DESCRIPTION = "description"; + private static final Long SIZE_GB = 500L; + private static final DiskTypeId TYPE = DiskTypeId.of("project", "zone", "disk"); + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final List ATTACHED_INSTANCES = ImmutableList.of( + InstanceId.of("project", "zone", "instance1"), + InstanceId.of("project", "zone", "instance2")); + private static final SnapshotId SNAPSHOT = SnapshotId.of("project", "snapshot"); + private static final ImageId IMAGE = ImageId.of("project", "image"); + private static final String SNAPSHOT_ID = "snapshotId"; + private static final String IMAGE_ID = "imageId"; + private static final Long LAST_ATTACH_TIMESTAMP = 1453293600000L; + private static final Long LAST_DETACH_TIMESTAMP = 1453293660000L; + private static final StandardDiskConfiguration DISK_CONFIGURATION = + StandardDiskConfiguration.builder() + .sizeGb(SIZE_GB) + .diskType(TYPE) + .build(); + private static final SnapshotDiskConfiguration SNAPSHOT_DISK_CONFIGURATION = + SnapshotDiskConfiguration.builder(SNAPSHOT) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceSnapshotId(SNAPSHOT_ID) + .build(); + private static final ImageDiskConfiguration IMAGE_DISK_CONFIGURATION = + ImageDiskConfiguration.builder(IMAGE) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceImageId(IMAGE_ID) + .build(); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Disk disk; + private Disk standardDisk; + private Disk snapshotDisk; + private Disk imageDisk; + + private void initializeExpectedDisk(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + standardDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + snapshotDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, SNAPSHOT_DISK_CONFIGURATION) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + imageDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, IMAGE_DISK_CONFIGURATION) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeDisk() { + disk = new Disk.Builder(compute, DISK_ID, DISK_CONFIGURATION) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedDisk(16); + compareDisk(standardDisk, standardDisk.toBuilder().build()); + compareDisk(imageDisk, imageDisk.toBuilder().build()); + compareDisk(snapshotDisk, snapshotDisk.toBuilder().build()); + Disk newDisk = standardDisk.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newDisk.description()); + newDisk = newDisk.toBuilder().description("description").build(); + compareDisk(standardDisk, newDisk); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedDisk(18); + DiskInfo diskInfo = DiskInfo.of(DISK_ID, DISK_CONFIGURATION); + Disk disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, SNAPSHOT_DISK_CONFIGURATION); + disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, IMAGE_DISK_CONFIGURATION); + disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedDisk(4); + assertEquals(DISK_ID, standardDisk.diskId()); + assertEquals(ID, standardDisk.id()); + assertEquals(DISK_CONFIGURATION, standardDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, standardDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, standardDisk.creationStatus()); + assertEquals(DESCRIPTION, standardDisk.description()); + assertEquals(LICENSES, standardDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, standardDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, standardDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, standardDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, standardDisk.compute()); + assertEquals(DISK_ID, imageDisk.diskId()); + assertEquals(ID, imageDisk.id()); + assertEquals(IMAGE_DISK_CONFIGURATION, imageDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, imageDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, imageDisk.creationStatus()); + assertEquals(DESCRIPTION, imageDisk.description()); + assertEquals(LICENSES, imageDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, imageDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, imageDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, imageDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, imageDisk.compute()); + assertEquals(DISK_ID, snapshotDisk.diskId()); + assertEquals(ID, snapshotDisk.id()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, snapshotDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, snapshotDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, snapshotDisk.creationStatus()); + assertEquals(DESCRIPTION, snapshotDisk.description()); + assertEquals(LICENSES, snapshotDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, snapshotDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, snapshotDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, snapshotDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, snapshotDisk.compute()); + Disk disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION) + .diskId(DiskId.of("newProject", "newZone")) + .configuration(SNAPSHOT_DISK_CONFIGURATION) + .build(); + assertEquals(DiskId.of("newProject", "newZone"), disk.diskId()); + assertNull(disk.id()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, disk.configuration()); + assertNull(disk.creationTimestamp()); + assertNull(disk.creationStatus()); + assertNull(disk.description()); + assertNull(disk.licenses()); + assertNull(disk.attachedInstances()); + assertNull(disk.lastAttachTimestamp()); + assertNull(disk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, disk.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedDisk(24); + compareDisk(standardDisk, Disk.fromPb(serviceMockReturnsOptions, standardDisk.toPb())); + compareDisk(imageDisk, Disk.fromPb(serviceMockReturnsOptions, imageDisk.toPb())); + compareDisk(snapshotDisk, Disk.fromPb(serviceMockReturnsOptions, snapshotDisk.toPb())); + Disk disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + disk = + new Disk.Builder(serviceMockReturnsOptions, DISK_ID, SNAPSHOT_DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, IMAGE_DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + expect(compute.delete(DISK_ID)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.delete(DISK_ID)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedDisk(3); + Compute.DiskOption[] expectedOptions = {Compute.DiskOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.get(DISK_ID, expectedOptions)).andReturn(imageDisk); + replay(compute); + initializeDisk(); + assertTrue(disk.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedDisk(3); + Compute.DiskOption[] expectedOptions = {Compute.DiskOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.get(DISK_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeDisk(); + assertFalse(disk.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedDisk(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.get(DISK_ID)).andReturn(imageDisk); + replay(compute); + initializeDisk(); + Disk updatedDisk = disk.reload(); + compareDisk(imageDisk, updatedDisk); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.get(DISK_ID)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedDisk(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.get(DISK_ID, Compute.DiskOption.fields())).andReturn(imageDisk); + replay(compute); + initializeDisk(); + Disk updatedDisk = disk.reload(Compute.DiskOption.fields()); + compareDisk(imageDisk, updatedDisk); + verify(compute); + } + + @Test + public void testCreateSnapshot() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID).build(); + expect(compute.create(snapshot)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createSnapshot(SNAPSHOT.snapshot())); + } + + @Test + public void testCreateSnapshotWithDescription() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID) + .description("description") + .build(); + expect(compute.create(snapshot)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createSnapshot(SNAPSHOT.snapshot(), "description")); + } + + @Test + public void testCreateSnapshotWithOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID).build(); + expect(compute.create(snapshot, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createSnapshot(SNAPSHOT.snapshot(), Compute.OperationOption.fields())); + } + + @Test + public void testCreateSnapshotWithDescriptionAndOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID) + .description("description") + .build(); + expect(compute.create(snapshot, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createSnapshot(SNAPSHOT.snapshot(), "description", Compute.OperationOption.fields())); + } + + @Test + public void testCreateImage() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.of(imageId, DiskImageConfiguration.of(DISK_ID)); + expect(compute.create(image)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image())); + } + + @Test + public void testCreateImageWithDescription() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.builder(imageId, DiskImageConfiguration.of(DISK_ID)) + .description("description") + .build(); + expect(compute.create(image)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image(), "description")); + } + + @Test + public void testCreateImageWithOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.of(imageId, DiskImageConfiguration.of(DISK_ID)); + expect(compute.create(image, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image(), Compute.OperationOption.fields())); + } + + @Test + public void testCreateImageWithDescriptionAndOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.builder(imageId, DiskImageConfiguration.of(DISK_ID)) + .description("description") + .build(); + expect(compute.create(image, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createImage(IMAGE.image(), "description", Compute.OperationOption.fields())); + } + + @Test + public void testResizeOperation() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + expect(compute.resize(DISK_ID, 42L)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.resize(42L)); + } + + @Test + public void testResizeNull() { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.resize(DISK_ID, 42L)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.resize(42L)); + } + + public void compareDisk(Disk expected, Disk value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.diskId(), value.diskId()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.id(), value.id()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.creationStatus(), value.creationStatus()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.attachedInstances(), value.attachedInstances()); + assertEquals(expected.lastAttachTimestamp(), value.lastAttachTimestamp()); + assertEquals(expected.lastDetachTimestamp(), value.lastDetachTimestamp()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/SerializationTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/SerializationTest.java index 57736854091f..bec8844ae111 100644 --- a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/SerializationTest.java +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/SerializationTest.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableList; import com.google.gcloud.AuthCredentials; import com.google.gcloud.RetryParams; -import com.google.gcloud.compute.Zone.MaintenanceWindow; import org.junit.Test; @@ -89,20 +88,12 @@ public class SerializationTest { .build(); private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); private static final Zone.Status ZONE_STATUS = Zone.Status.DOWN; - private static final Long BEGIN_TIME = 1453293420000L; - private static final Long END_TIME = 1453293480000L; - private static final MaintenanceWindow WINDOW1 = new MaintenanceWindow("NAME1", "DESCRIPTION1", - BEGIN_TIME, END_TIME); - private static final MaintenanceWindow WINDOW2 = new MaintenanceWindow("NAME2", "DESCRIPTION2", - BEGIN_TIME, END_TIME); - private static final List WINDOWS = ImmutableList.of(WINDOW1, WINDOW2); private static final Zone ZONE = Zone.builder() .zoneId(ZONE_ID) .id(ID) .creationTimestamp(CREATION_TIMESTAMP) .description(DESCRIPTION) .status(ZONE_STATUS) - .maintenanceWindows(WINDOWS) .region(REGION_ID) .build(); private static final DeprecationStatus DEPRECATION_STATUS = @@ -163,6 +154,8 @@ public class SerializationTest { private static final SnapshotDiskConfiguration SNAPSHOT_DISK_CONFIGURATION = SnapshotDiskConfiguration.of(SNAPSHOT_ID); private static final DiskInfo DISK_INFO = DiskInfo.of(DISK_ID, STANDARD_DISK_CONFIGURATION); + private static final Disk DISK = + new Disk.Builder(COMPUTE, DISK_ID, STANDARD_DISK_CONFIGURATION).build(); private static final Compute.DiskTypeOption DISK_TYPE_OPTION = Compute.DiskTypeOption.fields(); private static final Compute.DiskTypeFilter DISK_TYPE_FILTER = @@ -212,6 +205,13 @@ public class SerializationTest { Compute.ImageFilter.equals(Compute.ImageField.SELF_LINK, "selfLink"); private static final Compute.ImageListOption IMAGE_LIST_OPTION = Compute.ImageListOption.filter(IMAGE_FILTER); + private static final Compute.DiskOption DISK_OPTION = Compute.DiskOption.fields(); + private static final Compute.DiskFilter DISK_FILTER = + Compute.DiskFilter.equals(Compute.DiskField.SELF_LINK, "selfLink"); + private static final Compute.DiskListOption DISK_LIST_OPTION = + Compute.DiskListOption.filter(DISK_FILTER); + private static final Compute.DiskAggregatedListOption DISK_AGGREGATED_LIST_OPTION = + Compute.DiskAggregatedListOption.filter(DISK_FILTER); @Test public void testServiceOptions() throws Exception { @@ -239,15 +239,16 @@ public void testModelAndRequests() throws Exception { INSTANCE_ID, REGION_FORWARDING_RULE_ID, GLOBAL_FORWARDING_RULE_ID, GLOBAL_ADDRESS_ID, REGION_ADDRESS_ID, INSTANCE_USAGE, GLOBAL_FORWARDING_USAGE, REGION_FORWARDING_USAGE, ADDRESS_INFO, ADDRESS, DISK_ID, SNAPSHOT_ID, SNAPSHOT_INFO, SNAPSHOT, IMAGE_ID, - DISK_IMAGE_CONFIGURATION, STORAGE_IMAGE_CONFIGURATION, IMAGE_INFO, + DISK_IMAGE_CONFIGURATION, STORAGE_IMAGE_CONFIGURATION, IMAGE_INFO, IMAGE, STANDARD_DISK_CONFIGURATION, IMAGE_DISK_CONFIGURATION, SNAPSHOT_DISK_CONFIGURATION, - DISK_INFO, IMAGE, DISK_TYPE_OPTION, DISK_TYPE_FILTER, DISK_TYPE_LIST_OPTION, + DISK_INFO, DISK, DISK_TYPE_OPTION, DISK_TYPE_FILTER, DISK_TYPE_LIST_OPTION, DISK_TYPE_AGGREGATED_LIST_OPTION, MACHINE_TYPE_OPTION, MACHINE_TYPE_FILTER, MACHINE_TYPE_LIST_OPTION, MACHINE_TYPE_AGGREGATED_LIST_OPTION, REGION_OPTION, REGION_FILTER, REGION_LIST_OPTION, ZONE_OPTION, ZONE_FILTER, ZONE_LIST_OPTION, LICENSE_OPTION, OPERATION_OPTION, OPERATION_FILTER, OPERATION_LIST_OPTION, ADDRESS_OPTION, ADDRESS_FILTER, ADDRESS_LIST_OPTION, ADDRESS_AGGREGATED_LIST_OPTION, SNAPSHOT_OPTION, SNAPSHOT_FILTER, - SNAPSHOT_LIST_OPTION, IMAGE_OPTION, IMAGE_FILTER, IMAGE_LIST_OPTION}; + SNAPSHOT_LIST_OPTION, IMAGE_OPTION, IMAGE_FILTER, IMAGE_LIST_OPTION, DISK_OPTION, + DISK_FILTER, DISK_LIST_OPTION, DISK_AGGREGATED_LIST_OPTION}; for (Serializable obj : objects) { Object copy = serializeAndDeserialize(obj); assertEquals(obj, obj); diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ZoneTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ZoneTest.java index 1224c607a567..97a80a4de05b 100644 --- a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ZoneTest.java +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ZoneTest.java @@ -18,13 +18,8 @@ import static org.junit.Assert.assertEquals; -import com.google.common.collect.ImmutableList; -import com.google.gcloud.compute.Zone.MaintenanceWindow; - import org.junit.Test; -import java.util.List; - public class ZoneTest { private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); @@ -33,13 +28,6 @@ public class ZoneTest { private static final Long CREATION_TIMESTAMP = 1453293540000L; private static final String DESCRIPTION = "description"; private static final Zone.Status STATUS = Zone.Status.DOWN; - private static final Long BEGIN_TIME = 1453293420000L; - private static final Long END_TIME = 1453293480000L; - private static final MaintenanceWindow WINDOW1 = new MaintenanceWindow("NAME1", "DESCRIPTION1", - BEGIN_TIME, END_TIME); - private static final MaintenanceWindow WINDOW2 = new MaintenanceWindow("NAME2", "DESCRIPTION2", - BEGIN_TIME, END_TIME); - private static final List WINDOWS = ImmutableList.of(WINDOW1, WINDOW2); private static final DeprecationStatus DEPRECATION_STATUS = DeprecationStatus.of(DeprecationStatus.Status.DELETED, ZONE_ID); private static final Zone ZONE = Zone.builder() @@ -48,7 +36,6 @@ public class ZoneTest { .creationTimestamp(CREATION_TIMESTAMP) .description(DESCRIPTION) .status(STATUS) - .maintenanceWindows(WINDOWS) .deprecationStatus(DEPRECATION_STATUS) .region(REGION_ID) .build(); @@ -60,7 +47,6 @@ public void testBuilder() { assertEquals(CREATION_TIMESTAMP, ZONE.creationTimestamp()); assertEquals(DESCRIPTION, ZONE.description()); assertEquals(STATUS, ZONE.status()); - assertEquals(WINDOWS, ZONE.maintenanceWindows()); assertEquals(REGION_ID, ZONE.region()); assertEquals(DEPRECATION_STATUS, ZONE.deprecationStatus()); } @@ -84,7 +70,6 @@ private void compareZones(Zone expected, Zone value) { assertEquals(expected.creationTimestamp(), value.creationTimestamp()); assertEquals(expected.description(), value.description()); assertEquals(expected.status(), value.status()); - assertEquals(expected.maintenanceWindows(), value.maintenanceWindows()); assertEquals(expected.region(), value.region()); assertEquals(expected.deprecationStatus(), value.deprecationStatus()); assertEquals(expected.hashCode(), value.hashCode()); diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java index 666fe8500704..a2f472cdcbb8 100644 --- a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java @@ -29,8 +29,20 @@ import com.google.gcloud.compute.AddressId; import com.google.gcloud.compute.AddressInfo; import com.google.gcloud.compute.Compute; +import com.google.gcloud.compute.DeprecationStatus; +import com.google.gcloud.compute.Disk; +import com.google.gcloud.compute.DiskConfiguration; +import com.google.gcloud.compute.DiskId; +import com.google.gcloud.compute.DiskImageConfiguration; +import com.google.gcloud.compute.DiskInfo; import com.google.gcloud.compute.DiskType; +import com.google.gcloud.compute.DiskTypeId; import com.google.gcloud.compute.GlobalAddressId; +import com.google.gcloud.compute.Image; +import com.google.gcloud.compute.ImageConfiguration; +import com.google.gcloud.compute.ImageDiskConfiguration; +import com.google.gcloud.compute.ImageId; +import com.google.gcloud.compute.ImageInfo; import com.google.gcloud.compute.License; import com.google.gcloud.compute.LicenseId; import com.google.gcloud.compute.MachineType; @@ -38,6 +50,12 @@ import com.google.gcloud.compute.Region; import com.google.gcloud.compute.RegionAddressId; import com.google.gcloud.compute.RegionOperationId; +import com.google.gcloud.compute.Snapshot; +import com.google.gcloud.compute.SnapshotDiskConfiguration; +import com.google.gcloud.compute.SnapshotId; +import com.google.gcloud.compute.SnapshotInfo; +import com.google.gcloud.compute.StandardDiskConfiguration; +import com.google.gcloud.compute.StorageImageConfiguration; import com.google.gcloud.compute.Zone; import com.google.gcloud.compute.ZoneOperationId; import com.google.gcloud.compute.testing.RemoteComputeHelper; @@ -58,6 +76,8 @@ public class ITComputeTest { private static final String MACHINE_TYPE = "f1-micro"; private static final LicenseId LICENSE_ID = LicenseId.of("ubuntu-os-cloud", "ubuntu-1404-trusty"); private static final String BASE_RESOURCE_NAME = RemoteComputeHelper.baseResourceName(); + private static final ImageId IMAGE_ID = ImageId.of("debian-cloud", "debian-8-jessie-v20160219"); + private static final String IMAGE_PROJECT = "debian-cloud"; private static Compute compute; @@ -411,7 +431,6 @@ public void testGetZoneWithSelectedFields() { assertNull(zone.creationTimestamp()); assertNull(zone.description()); assertNull(zone.status()); - assertNull(zone.maintenanceWindows()); assertNull(zone.region()); } @@ -855,4 +874,486 @@ public void testListGlobalAddresses() throws InterruptedException { compute.delete(firstAddressId); compute.delete(secondAddressId); } + + @Test + public void testCreateGetResizeAndDeleteStandardDisk() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-standard-disk"; + DiskId diskId = DiskId.of(ZONE, name); + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Disk remoteDisk = compute.get(diskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().diskType()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.resize(200L); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test resize and get with selected fields + remoteDisk = compute.get(diskId, Compute.DiskOption.fields(Compute.DiskField.SIZE_GB)); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(200L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().diskType()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.get(diskId)); + } + + @Test + public void testCreateGetAndDeleteImageDisk() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-image-disk"; + DiskId diskId = DiskId.of(ZONE, name); + DiskInfo diskInfo = DiskInfo.of(diskId, ImageDiskConfiguration.of(IMAGE_ID)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Disk remoteDisk = compute.get(diskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof ImageDiskConfiguration); + ImageDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(IMAGE_ID, remoteConfiguration.sourceImage()); + assertNotNull(remoteConfiguration.sourceImageId()); + assertEquals(DiskConfiguration.Type.IMAGE, remoteConfiguration.type()); + assertNotNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().diskType()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + // test get with selected fields + remoteDisk = compute.get(diskId, Compute.DiskOption.fields()); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof ImageDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(IMAGE_ID, remoteConfiguration.sourceImage()); + assertNull(remoteConfiguration.sourceImageId()); + assertEquals(DiskConfiguration.Type.IMAGE, remoteConfiguration.type()); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().diskType()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.get(diskId)); + } + + @Test + public void testCreateGetAndDeleteSnapshotAndSnapshotDisk() throws InterruptedException { + String diskName = BASE_RESOURCE_NAME + "create-and-get-snapshot-disk1"; + String snapshotDiskName = BASE_RESOURCE_NAME + "create-and-get-snapshot-disk2"; + DiskId diskId = DiskId.of(ZONE, diskName); + DiskId snapshotDiskId = DiskId.of(ZONE, snapshotDiskName); + String snapshotName = BASE_RESOURCE_NAME + "create-and-get-snapshot"; + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Disk remoteDisk = compute.get(diskId); + operation = remoteDisk.createSnapshot(snapshotName); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get snapshot with selected fields + Snapshot snapshot = compute.getSnapshot(snapshotName, + Compute.SnapshotOption.fields(Compute.SnapshotField.CREATION_TIMESTAMP)); + assertNull(snapshot.id()); + assertNotNull(snapshot.snapshotId()); + assertNotNull(snapshot.creationTimestamp()); + assertNull(snapshot.description()); + assertNull(snapshot.status()); + assertNull(snapshot.diskSizeGb()); + assertNull(snapshot.licenses()); + assertNull(snapshot.sourceDisk()); + assertNull(snapshot.sourceDiskId()); + assertNull(snapshot.storageBytes()); + assertNull(snapshot.storageBytesStatus()); + // test get snapshot + snapshot = compute.getSnapshot(snapshotName); + assertNotNull(snapshot.id()); + assertNotNull(snapshot.snapshotId()); + assertNotNull(snapshot.creationTimestamp()); + assertNotNull(snapshot.status()); + assertEquals(100L, (long) snapshot.diskSizeGb()); + assertEquals(diskName, snapshot.sourceDisk().disk()); + assertNotNull(snapshot.sourceDiskId()); + assertNotNull(snapshot.storageBytes()); + assertNotNull(snapshot.storageBytesStatus()); + remoteDisk.delete(); + diskInfo = + DiskInfo.of(snapshotDiskId, SnapshotDiskConfiguration.of(SnapshotId.of(snapshotName))); + operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get disk + remoteDisk = compute.get(snapshotDiskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(snapshotDiskId.disk(), remoteDisk.diskId().disk()); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof SnapshotDiskConfiguration); + SnapshotDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(DiskConfiguration.Type.SNAPSHOT, remoteConfiguration.type()); + assertEquals(snapshotName, remoteConfiguration.sourceSnapshot().snapshot()); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().diskType()); + assertNotNull(remoteConfiguration.sourceSnapshotId()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + // test get disk with selected fields + remoteDisk = compute.get(snapshotDiskId, Compute.DiskOption.fields()); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(snapshotDiskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationStatus()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof SnapshotDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(DiskConfiguration.Type.SNAPSHOT, remoteConfiguration.type()); + assertEquals(snapshotName, remoteConfiguration.sourceSnapshot().snapshot()); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().diskType()); + assertNull(remoteDisk.configuration().sourceSnapshotId()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.get(snapshotDiskId)); + operation = snapshot.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getSnapshot(snapshotName)); + } + + @Test + public void testListDisksAndSnapshots() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-disks-and-snapshots-disk"; + String[] diskNames = {prefix + "1", prefix + "2"}; + DiskId firstDiskId = DiskId.of(ZONE, diskNames[0]); + DiskId secondDiskId = DiskId.of(ZONE, diskNames[1]); + DiskConfiguration configuration = + StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L); + Operation firstOperation = compute.create(DiskInfo.of(firstDiskId, configuration)); + Operation secondOperation = compute.create(DiskInfo.of(secondDiskId, configuration)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set diskSet = ImmutableSet.copyOf(diskNames); + // test list disks + Compute.DiskFilter diskFilter = + Compute.DiskFilter.equals(Compute.DiskField.NAME, prefix + "\\d"); + Page diskPage = compute.listDisks(ZONE, Compute.DiskListOption.filter(diskFilter)); + Iterator diskIterator = diskPage.iterateAll(); + int count = 0; + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().diskType()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + count++; + } + assertEquals(2, count); + // test list disks with selected fields + count = 0; + diskPage = compute.listDisks(ZONE, Compute.DiskListOption.filter(diskFilter), + Compute.DiskListOption.fields(Compute.DiskField.STATUS)); + diskIterator = diskPage.iterateAll(); + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().diskType()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + count++; + } + assertEquals(2, count); + // test snapshots + SnapshotId firstSnapshotId = SnapshotId.of(diskNames[0]); + SnapshotId secondSnapshotId = SnapshotId.of(diskNames[1]); + firstOperation = compute.create(SnapshotInfo.of(firstSnapshotId, firstDiskId)); + secondOperation = compute.create(SnapshotInfo.of(secondSnapshotId, secondDiskId)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + // test list snapshots + Compute.SnapshotFilter snapshotFilter = + Compute.SnapshotFilter.equals(Compute.SnapshotField.NAME, prefix + "\\d"); + Page snapshotPage = + compute.listSnapshots(Compute.SnapshotListOption.filter(snapshotFilter)); + Iterator snapshotIterator = snapshotPage.iterateAll(); + count = 0; + while (snapshotIterator.hasNext()) { + Snapshot remoteSnapshot = snapshotIterator.next(); + assertNotNull(remoteSnapshot.id()); + assertTrue(diskSet.contains(remoteSnapshot.snapshotId().snapshot())); + assertNotNull(remoteSnapshot.creationTimestamp()); + assertNotNull(remoteSnapshot.status()); + assertEquals(100L, (long) remoteSnapshot.diskSizeGb()); + assertTrue(diskSet.contains(remoteSnapshot.sourceDisk().disk())); + assertNotNull(remoteSnapshot.sourceDiskId()); + assertNotNull(remoteSnapshot.storageBytes()); + assertNotNull(remoteSnapshot.storageBytesStatus()); + count++; + } + assertEquals(2, count); + // test list snapshots with selected fields + snapshotPage = compute.listSnapshots(Compute.SnapshotListOption.filter(snapshotFilter), + Compute.SnapshotListOption.fields(Compute.SnapshotField.CREATION_TIMESTAMP)); + snapshotIterator = snapshotPage.iterateAll(); + count = 0; + while (snapshotIterator.hasNext()) { + Snapshot remoteSnapshot = snapshotIterator.next(); + assertNull(remoteSnapshot.id()); + assertTrue(diskSet.contains(remoteSnapshot.snapshotId().snapshot())); + assertNotNull(remoteSnapshot.creationTimestamp()); + assertNull(remoteSnapshot.status()); + assertNull(remoteSnapshot.diskSizeGb()); + assertNull(remoteSnapshot.sourceDisk()); + assertNull(remoteSnapshot.sourceDiskId()); + assertNull(remoteSnapshot.storageBytes()); + assertNull(remoteSnapshot.storageBytesStatus()); + count++; + } + assertEquals(2, count); + compute.delete(firstDiskId); + compute.delete(secondDiskId); + compute.deleteSnapshot(firstSnapshotId); + compute.deleteSnapshot(secondSnapshotId); + } + + @Test + public void testAggregatedListDisks() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-aggregated-disk"; + String[] diskZones = {"us-central1-a", "us-east1-c"}; + String[] diskNames = {prefix + "1", prefix + "2"}; + DiskId firstDiskId = DiskId.of(diskZones[0], diskNames[0]); + DiskId secondDiskId = DiskId.of(diskZones[1], diskNames[1]); + DiskConfiguration configuration = + StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L); + Operation firstOperation = compute.create(DiskInfo.of(firstDiskId, configuration)); + Operation secondOperation = compute.create(DiskInfo.of(secondDiskId, configuration)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set zoneSet = ImmutableSet.copyOf(diskZones); + Set diskSet = ImmutableSet.copyOf(diskNames); + Compute.DiskFilter diskFilter = + Compute.DiskFilter.equals(Compute.DiskField.NAME, prefix + "\\d"); + Page diskPage = compute.listDisks(Compute.DiskAggregatedListOption.filter(diskFilter)); + Iterator diskIterator = diskPage.iterateAll(); + int count = 0; + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertTrue(zoneSet.contains(remoteDisk.diskId().zone())); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.id()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().diskType()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + count++; + } + assertEquals(2, count); + compute.delete(firstDiskId); + compute.delete(secondDiskId); + } + + @Test + public void testCreateGetAndDeprecateImage() throws InterruptedException { + String diskName = BASE_RESOURCE_NAME + "create-and-get-image-disk"; + String imageName = BASE_RESOURCE_NAME + "create-and-get-image"; + DiskId diskId = DiskId.of(ZONE, diskName); + ImageId imageId = ImageId.of(imageName); + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Disk remoteDisk = compute.get(diskId); + ImageInfo imageInfo = ImageInfo.of(imageId, DiskImageConfiguration.of(diskId)); + operation = compute.create(imageInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get image with selected fields + Image image = compute.get(imageId, + Compute.ImageOption.fields(Compute.ImageField.CREATION_TIMESTAMP)); + assertNull(image.id()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNull(image.description()); + assertNotNull(image.configuration()); + assertTrue(image.configuration() instanceof DiskImageConfiguration); + DiskImageConfiguration remoteConfiguration = image.configuration(); + assertEquals(ImageConfiguration.Type.DISK, remoteConfiguration.type()); + assertEquals(diskName, remoteConfiguration.sourceDisk().disk()); + assertNull(image.status()); + assertNull(image.diskSizeGb()); + assertNull(image.licenses()); + assertNull(image.deprecationStatus()); + // test get image + image = compute.get(imageId); + assertNotNull(image.id()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertTrue(image.configuration() instanceof DiskImageConfiguration); + remoteConfiguration = image.configuration(); + assertEquals(ImageConfiguration.Type.DISK, remoteConfiguration.type()); + assertEquals(diskName, remoteConfiguration.sourceDisk().disk()); + assertEquals(100L, (long) image.diskSizeGb()); + assertNotNull(image.status()); + assertNull(image.deprecationStatus()); + // test deprecate image + DeprecationStatus deprecationStatus = + DeprecationStatus.builder(DeprecationStatus.Status.DEPRECATED, imageId) + .deprecated(System.currentTimeMillis()) + .build(); + operation = image.deprecate(deprecationStatus); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + image = compute.get(imageId); + assertEquals(deprecationStatus, image.deprecationStatus()); + remoteDisk.delete(); + operation = image.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.get(imageId)); + } + + @Test + public void testListImages() { + Page imagePage = compute.listImages(IMAGE_PROJECT); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.id()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNotNull(image.status()); + assertNotNull(image.diskSizeGb()); + } + assertTrue(count > 0); + } + + @Test + public void testListImagesWithSelectedFields() { + Page imagePage = + compute.listImages(IMAGE_PROJECT, Compute.ImageListOption.fields(Compute.ImageField.ID)); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.id()); + assertNotNull(image.imageId()); + assertNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNull(image.status()); + assertNull(image.diskSizeGb()); + assertNull(image.licenses()); + assertNull(image.deprecationStatus()); + } + assertTrue(count > 0); + } + + @Test + public void testListImagesWithFilter() { + Page imagePage = compute.listImages(IMAGE_PROJECT, Compute.ImageListOption.filter( + Compute.ImageFilter.equals(Compute.ImageField.ARCHIVE_SIZE_BYTES, 365056004L))); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.id()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNotNull(image.status()); + assertNotNull(image.diskSizeGb()); + assertEquals(365056004L, + (long) image.configuration().archiveSizeBytes()); + } + assertTrue(count > 0); + } }