Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New Resource: azurerm_key_vault_managed_hardware_security_module_key #25069

Closed
wants to merge 10 commits into from
Closed
7 changes: 4 additions & 3 deletions internal/provider/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,10 @@ func TestResourcesSupportCustomTimeouts(t *testing.T) {
} else if *resource.Timeouts.Read > 5*time.Minute {
exceptionResources := map[string]bool{
// The key vault item resources have longer read timeout for mitigating issue: https://github.com/hashicorp/terraform-provider-azurerm/issues/11059.
"azurerm_key_vault_key": true,
"azurerm_key_vault_secret": true,
"azurerm_key_vault_certificate": true,
"azurerm_key_vault_key": true,
"azurerm_key_vault_secret": true,
"azurerm_key_vault_certificate": true,
"azurerm_key_vault_managed_hardware_security_module_key": true,
}
if !exceptionResources[resourceName] {
t.Fatalf("Read timeouts shouldn't be more than 5 minutes, this indicates a bug which needs to be fixed")
Expand Down
5 changes: 5 additions & 0 deletions internal/services/managedhsm/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ type Client struct {

// Data Plane
DataPlaneClient *dataplane.BaseClient
DataPlaneManagedHSMClient *dataplane.BaseClient
DataPlaneRoleAssignmentsClient *dataplane.RoleAssignmentsClient
DataPlaneRoleDefinitionsClient *dataplane.RoleDefinitionsClient
DataPlaneSecurityDomainsClient *dataplane.HSMSecurityDomainClient
Expand All @@ -39,6 +40,9 @@ func NewClient(o *common.ClientOptions) (*Client, error) {
managementClient := dataplane.New()
o.ConfigureClient(&managementClient.Client, o.KeyVaultAuthorizer)

managementHSMClient := dataplane.New()
o.ConfigureClient(&managementHSMClient.Client, o.ManagedHSMAuthorizer)

securityDomainClient := dataplane.NewHSMSecurityDomainClient()
o.ConfigureClient(&securityDomainClient.Client, o.ManagedHSMAuthorizer)

Expand All @@ -54,6 +58,7 @@ func NewClient(o *common.ClientOptions) (*Client, error) {

// Data Plane
DataPlaneClient: &managementClient,
DataPlaneManagedHSMClient: &managementHSMClient,
DataPlaneSecurityDomainsClient: &securityDomainClient,
DataPlaneRoleDefinitionsClient: &roleDefinitionsClient,
DataPlaneRoleAssignmentsClient: &roleAssignmentsClient,
Expand Down
177 changes: 177 additions & 0 deletions internal/services/managedhsm/client/helpers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
// Copyright (c) HashiCorp, Inc.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I implemented a new cache logic that differs from keyvault's helper functions. It may appear different, but it should work properly. The locks in the keyvault helpers are complex and may not function as expected actually.

// SPDX-License-Identifier: MPL-2.0
package client

import (
"context"
"fmt"
"net/url"
"strings"
"sync"

"github.com/hashicorp/go-azure-helpers/lang/response"
"github.com/hashicorp/go-azure-helpers/resourcemanager/commonids"
"github.com/hashicorp/go-azure-sdk/resource-manager/keyvault/2023-07-01/managedhsms"
)

type cacheItem struct {
ID managedhsms.ManagedHSMId
BaseURI string
}

type localCache struct {
mux sync.Locker
nameToItem map[string]cacheItem
}

var defaultCache = &localCache{
mux: &sync.Mutex{},
nameToItem: map[string]cacheItem{},
}

Comment on lines +22 to +31
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why are there two caches??

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just defined a struct and an instance of the struct to managed the cache, instead of a global map and lock variables which used in keyvault helpers. I think this way is more readable and easy to manage the lock operations.

func cacheKey(name string) string {
return strings.ToLower(name)
}
func AddToCache(id managedhsms.ManagedHSMId, baseURI string) {
defaultCache.add(id, baseURI)
}

func (l *localCache) add(id managedhsms.ManagedHSMId, baseURI string) {
l.mux.Lock()
defer l.mux.Unlock()

l.nameToItem[cacheKey(id.ManagedHSMName)] = cacheItem{
ID: id,
BaseURI: baseURI,
}
}

func (l *localCache) get(name string) (cacheItem, bool) {
l.mux.Lock()
defer l.mux.Unlock()

item, ok := l.nameToItem[cacheKey(name)]
return item, ok
}

func RemoveFromCache(name string) {
defaultCache.remove(name)
}
func (l *localCache) remove(name string) {
l.mux.Lock()
defer l.mux.Unlock()

delete(l.nameToItem, cacheKey(name))
}

func (c *Client) BaseUriForManagedHSM(ctx context.Context, id managedhsms.ManagedHSMId) (*string, error) {
item, ok := defaultCache.get(id.ManagedHSMName)
if ok {
return &item.BaseURI, nil
}

resp, err := c.ManagedHsmClient.Get(ctx, id)
if err != nil {
if response.WasNotFound(resp.HttpResponse) {
return nil, fmt.Errorf("managedHSM %s was not found", id)
}
return nil, fmt.Errorf("retrieving managedHSM %s: %+v", id, err)
}

vaultUri := ""
if model := resp.Model; model != nil {
if model.Properties.HsmUri != nil {
vaultUri = *model.Properties.HsmUri
}
}
if vaultUri == "" {
return nil, fmt.Errorf("retrieving %s: `properties.VaultUri` was nil", id)
}

defaultCache.add(id, vaultUri)
return &vaultUri, nil
}

func (c *Client) ManagedHSMIDFromBaseUri(ctx context.Context, subscriptionId commonids.SubscriptionId, uri string) (*managedhsms.ManagedHSMId, error) {
name, err := parseNameFromBaseUrl(uri)
if err != nil {
return nil, err
}

item, ok := defaultCache.get(*name)
if ok {
return &item.ID, nil
}
// fetch all managedhsms
opts := managedhsms.DefaultListBySubscriptionOperationOptions()
results, err := c.ManagedHsmClient.ListBySubscriptionComplete(ctx, subscriptionId, opts)
if err != nil {
return nil, fmt.Errorf("listing the managed HSM within %s: %+v", subscriptionId, err)
}
for _, item := range results.Items {
if item.Id == nil || item.Properties.HsmUri == nil {
continue
}

// Populate the managed HSM into the cache
managedHSMID, err := managedhsms.ParseManagedHSMIDInsensitively(*item.Id)
if err != nil {
return nil, fmt.Errorf("parsing %q as a managed HSM ID: %+v", *item.Id, err)
}
hsmUri := *item.Properties.HsmUri
defaultCache.add(*managedHSMID, hsmUri)
}

// Now that the cache has been repopulated, check if we have the managed HSM or not
if v, ok := defaultCache.get(*name); ok {
return &v.ID, nil
}
return nil, fmt.Errorf("not implemented")
}

func (c *Client) ManagedHSMExists(ctx context.Context, id managedhsms.ManagedHSMId) (bool, error) {
_, ok := defaultCache.get(id.ManagedHSMName)
if ok {
return true, nil
}

resp, err := c.ManagedHsmClient.Get(ctx, id)
if err != nil {
if response.WasNotFound(resp.HttpResponse) {
return false, nil
}
return false, fmt.Errorf("retrieving managedHSM %s: %+v", id, err)
}

vaultUri := ""
if model := resp.Model; model != nil {
if model.Properties.HsmUri != nil {
vaultUri = *model.Properties.HsmUri
}
}
if vaultUri == "" {
return false, fmt.Errorf("retrieving %s: `properties.VaultUri` was nil", id)
}

defaultCache.add(id, vaultUri)
return true, nil
}

func parseNameFromBaseUrl(input string) (*string, error) {
uri, err := url.Parse(input)
if err != nil {
return nil, err
}

// https://the-hsm.managedhsm.azure.net
// https://the-hsm.managedhsm.microsoftazure.de
// https://the-hsm.managedhsm.usgovcloudapi.net
// https://the-hsm.managedhsm.cloudapi.microsoft
// https://the-hsm.managedhsm.azure.cn

segments := strings.Split(uri.Host, ".")
if len(segments) < 3 || segments[1] != "managedhsm" {
return nil, fmt.Errorf("expected a URI in the format `the-managedhsm-name.managedhsm.**` but got %q", uri.Host)
}
return &segments[0], nil
}
49 changes: 49 additions & 0 deletions internal/services/managedhsm/custompollers/recover_key_poller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0

package custompollers

import (
"context"
"fmt"
"log"
"net/http"
"time"

"github.com/hashicorp/go-azure-helpers/lang/response"
"github.com/hashicorp/go-azure-sdk/sdk/client/pollers"
)

var _ pollers.PollerType = &recoverKeyPoller{}

func NewRecoverKeyPoller(uri string) pollers.PollerType {
return &recoverKeyPoller{
uri: uri,
}
}

type recoverKeyPoller struct {
uri string
}

func (p *recoverKeyPoller) Poll(ctx context.Context) (*pollers.PollResult, error) {

res := &pollers.PollResult{
PollInterval: time.Second * 20,
Status: pollers.PollingStatusInProgress,
}
conn, err := http.Get(p.uri)
if err != nil {
log.Printf("[DEBUG] Didn't find KeyVault secret at %q", p.uri)
return res, fmt.Errorf("checking secret at %q: %s", p.uri, err)
}

defer conn.Body.Close()
if response.WasNotFound(conn) {
res.Status = pollers.PollingStatusSucceeded
return res, nil
}

res.Status = pollers.PollingStatusSucceeded
return res, nil
}
Loading
Loading