Skip to content
This repository has been archived by the owner on Jun 23, 2020. It is now read-only.

Wait for AVAILABLE status before returning from create #157

Merged
merged 3 commits into from
Aug 7, 2018

Conversation

garthy
Copy link
Member

@garthy garthy commented Aug 7, 2018

No description provided.

@garthy garthy requested a review from tjfontaine August 7, 2018 10:04
Copy link
Contributor

@prydie prydie left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Few minor stylistic comments but otherwise LGTM 👍

metadata instancemeta.Interface,
volumeRoundingEnabled bool,
minVolumeSize resource.Quantity,
timeout time.Duration) plugin.ProvisionerPlugin {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

func NewBlockProvisioner(client client.ProvisionerClient,
	metadata instancemeta.Interface,
	volumeRoundingEnabled bool,
	minVolumeSize resource.Quantity,
 	timeout time.Duration,
) plugin.ProvisionerPlugin {

return false, err
}

if getVolumeResponse.LifecycleState == core.VolumeLifecycleStateAvailable {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

switch state := getVolumeResponse.LifecycleState {
case core.VolumeLifecycleStateAvailable:
	return true, nil
case core.VolumeLifecycleStateFaulty, core.VolumeLifecycleStateTerminated, core.VolumeLifecycleStateTerminating:
 	return false, fmt.Errorf("volume has lifecycle state %q", state)
}

return false, nil
}

ready, err := isVolumeReady()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

return wait.PollImmediate(time.Second * 5, timeout, func() (bool, error) {
 	ready, err := isVolumeReady() 
	if err != nil {
		return false, fmt.Errorf("failed to provision volume %q: %v", *volumeID, err)
	}
	return ready, nil
})

k8s.io/apimachinery/pkg/util/wait

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was halfway through writing this and then thought K8s would have something...

ctx, cancel := context.WithTimeout(block.client.Context(), block.client.Timeout())
defer cancel()

_, _ = block.client.BlockStorage().DeleteVolume(ctx, request)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_, _ = block.client.BlockStorage().DeleteVolume(ctx, core.DeleteVolumeRequest{
	VolumeId: newVolume.Id,
})

@@ -102,8 +108,9 @@ func (p *mockProvisionerClient) TenancyOCID() string {
}

// NewClientProvisioner creates an OCI client from the given configuration.
func NewClientProvisioner(pcData client.ProvisionerClient) client.ProvisionerClient {
return &mockProvisionerClient{}
func NewClientProvisioner(pcData client.ProvisionerClient,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Either

func NewClientProvisioner(pcData client.ProvisionerClient, storage *mockBlockStorageClient) client.ProvisionerClient {

or

func NewClientProvisioner(
	pcData client.ProvisionerClient,
	storage *mockBlockStorageClient,
) client.ProvisionerClient {

@prydie prydie merged commit 4691383 into master Aug 7, 2018
@prydie prydie deleted the gb/wait-for-volume-available branch August 7, 2018 13:55
rjtsdl pushed a commit to rjtsdl/oci-volume-provisioner that referenced this pull request Dec 20, 2018
* Refactor LBSpec to derive state up-front
* Fix panic when no backends
* Refactor security list manager to use portSpec
* Remove old node port rule in Update()
* Remove old node port rule in securityListManger.Update() rather than
Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants