From f9e8e54835d9afc06f96fff07085b8f75b0de718 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Thu, 30 Mar 2017 15:33:54 +0100 Subject: [PATCH] backend: convert Azure remote state to a backend Added locking support via blob leasing (requires that an empty state is created before any lock can be acquired. Added support for "environments" in much the same way as the S3 backend. --- backend/init/init.go | 2 + backend/remote-state/azure/backend.go | 202 +++++++++++++++++ backend/remote-state/azure/backend_state.go | 137 +++++++++++ backend/remote-state/azure/backend_test.go | 226 +++++++++++++++++++ backend/remote-state/azure/client.go | 238 ++++++++++++++++++++ backend/remote-state/azure/client_test.go | 69 ++++++ state/remote/remote.go | 1 - website/docs/backends/types/azure.html.md | 14 +- 8 files changed, 884 insertions(+), 5 deletions(-) create mode 100644 backend/remote-state/azure/backend.go create mode 100644 backend/remote-state/azure/backend_state.go create mode 100644 backend/remote-state/azure/backend_test.go create mode 100644 backend/remote-state/azure/client.go create mode 100644 backend/remote-state/azure/client_test.go diff --git a/backend/init/init.go b/backend/init/init.go index eb74ebeb5..3db130a49 100644 --- a/backend/init/init.go +++ b/backend/init/init.go @@ -10,6 +10,7 @@ import ( backendatlas "github.com/hashicorp/terraform/backend/atlas" backendlegacy "github.com/hashicorp/terraform/backend/legacy" backendlocal "github.com/hashicorp/terraform/backend/local" + backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul" backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem" backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" @@ -40,6 +41,7 @@ func init() { "inmem": func() backend.Backend { return backendinmem.New() }, "swift": func() backend.Backend { return backendSwift.New() }, "s3": func() backend.Backend { return backendS3.New() }, + "azure": func() backend.Backend { return backendAzure.New() }, } // Add the legacy remote backends that haven't yet been convertd to diff --git a/backend/remote-state/azure/backend.go b/backend/remote-state/azure/backend.go new file mode 100644 index 000000000..4431d4f5f --- /dev/null +++ b/backend/remote-state/azure/backend.go @@ -0,0 +1,202 @@ +package azure + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +// New creates a new backend for S3 remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "storage_account_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the storage account.", + }, + + "container_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The container name.", + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The blob key.", + }, + + "environment": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Azure cloud environment.", + Default: "", + }, + + "access_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The access key.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), + }, + + "resource_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The resource group name.", + }, + + "arm_subscription_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), + }, + + "arm_client_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), + }, + + "arm_client_secret": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), + }, + + "arm_tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + blobClient storage.BlobStorageClient + + containerName string + keyName string + leaseID string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.containerName != "" { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + b.containerName = data.Get("container_name").(string) + b.keyName = data.Get("key").(string) + + blobClient, err := getBlobClient(data) + if err != nil { + return err + } + b.blobClient = blobClient + + return nil +} + +func getBlobClient(d *schema.ResourceData) (storage.BlobStorageClient, error) { + var client storage.BlobStorageClient + + env, err := getAzureEnvironment(d.Get("environment").(string)) + if err != nil { + return client, err + } + + storageAccountName := d.Get("storage_account_name").(string) + + accessKey, err := getAccessKey(d, storageAccountName, env) + if err != nil { + return client, err + } + + storageClient, err := storage.NewClient(storageAccountName, accessKey, env.StorageEndpointSuffix, + storage.DefaultAPIVersion, true) + if err != nil { + return client, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) + } + + client = storageClient.GetBlobService() + return client, nil +} + +func getAccessKey(d *schema.ResourceData, storageAccountName string, env azure.Environment) (string, error) { + if key, ok := d.GetOk("access_key"); ok { + return key.(string), nil + } + + resourceGroupName, rgOk := d.GetOk("resource_group_name") + subscriptionID, subOk := d.GetOk("arm_subscription_id") + clientID, clientIDOk := d.GetOk("arm_client_id") + clientSecret, clientSecretOK := d.GetOk("arm_client_secret") + tenantID, tenantIDOk := d.GetOk("arm_tenant_id") + if !rgOk || !subOk || !clientIDOk || !clientSecretOK || !tenantIDOk { + return "", fmt.Errorf("resource_group_name and credentials must be provided when access_key is absent") + } + + oauthConfig, err := env.OAuthConfigForTenant(tenantID.(string)) + if err != nil { + return "", err + } + + spt, err := azure.NewServicePrincipalToken(*oauthConfig, clientID.(string), clientSecret.(string), env.ResourceManagerEndpoint) + if err != nil { + return "", err + } + + accountsClient := storage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID.(string)) + accountsClient.Authorizer = spt + + keys, err := accountsClient.ListKeys(resourceGroupName.(string), storageAccountName) + if err != nil { + return "", fmt.Errorf("Error retrieving keys for storage account %q: %s", storageAccountName, err) + } + + if keys.Keys == nil { + return "", fmt.Errorf("Nil key returned for storage account %q", storageAccountName) + } + + accessKeys := *keys.Keys + return *accessKeys[0].Value, nil +} + +func getAzureEnvironment(environment string) (azure.Environment, error) { + if environment == "" { + return azure.PublicCloud, nil + } + + env, err := azure.EnvironmentFromName(environment) + if err != nil { + // try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD + var innerErr error + env, innerErr = azure.EnvironmentFromName(fmt.Sprintf("AZURE%sCLOUD", environment)) + if innerErr != nil { + return env, fmt.Errorf("invalid 'environment' configuration: %s", err) + } + } + + return env, nil +} diff --git a/backend/remote-state/azure/backend_state.go b/backend/remote-state/azure/backend_state.go new file mode 100644 index 000000000..c6c86f309 --- /dev/null +++ b/backend/remote-state/azure/backend_state.go @@ -0,0 +1,137 @@ +package azure + +import ( + "fmt" + "sort" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + // This will be used as directory name, the odd looking colon is simply to + // reduce the chance of name conflicts with existing objects. + keyEnvPrefix = "env:" +) + +func (b *Backend) States() ([]string, error) { + prefix := b.keyName + keyEnvPrefix + params := storage.ListBlobsParameters{ + Prefix: prefix, + } + + container := b.blobClient.GetContainerReference(b.containerName) + resp, err := container.ListBlobs(params) + if err != nil { + return nil, err + } + + envs := map[string]struct{}{} + for _, obj := range resp.Blobs { + key := obj.Name + if strings.HasPrefix(key, prefix) { + name := strings.TrimPrefix(key, prefix) + // we store the state in a key, not a directory + if strings.Contains(name, "/") { + continue + } + + envs[name] = struct{}{} + } + } + + result := []string{backend.DefaultStateName} + for name := range envs { + result = append(result, name) + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteState(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + return b.blobClient.DeleteBlob(b.containerName, b.path(name), nil) +} + +func (b *Backend) State(name string) (state.State, error) { + client := &RemoteClient{ + blobClient: b.blobClient, + containerName: b.containerName, + keyName: b.path(name), + } + + stateMgr := &remote.State{Client: client} + + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if name != backend.DefaultStateName { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock azure state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return b.keyName + keyEnvPrefix + name +} + +const errStateUnlock = ` +Error unlocking Azure state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/backend/remote-state/azure/backend_test.go b/backend/remote-state/azure/backend_test.go new file mode 100644 index 000000000..6164c75aa --- /dev/null +++ b/backend/remote-state/azure/backend_test.go @@ -0,0 +1,226 @@ +package azure + +import ( + "fmt" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/azure-storage-go" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/acctest" +) + +// verify that we are doing ACC tests or the Azure tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_AZURE_TEST") == "" + if skip { + t.Log("azure backend tests require setting TF_ACC or TF_AZURE_TEST") + t.Skip() + } +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + // This test just instantiates the client. Shouldn't make any actual + // requests nor incur any costs. + + config := map[string]interface{}{ + "storage_account_name": "tfaccount", + "container_name": "tfcontainer", + "key": "state", + // Access Key must be Base64 + "access_key": "QUNDRVNTX0tFWQ0K", + } + + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b.containerName != "tfcontainer" { + t.Fatalf("Incorrect bucketName was populated") + } + if b.keyName != "state" { + t.Fatalf("Incorrect keyName was populated") + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + keyName := "testState" + res := setupResources(t, keyName) + defer destroyResources(t, res.resourceGroupName) + + b := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + backend.TestBackend(t, b, nil) +} + +func TestBackendLocked(t *testing.T) { + testACC(t) + + keyName := "testState" + res := setupResources(t, keyName) + defer destroyResources(t, res.resourceGroupName) + + b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + backend.TestBackend(t, b1, b2) +} + +type testResources struct { + resourceGroupName string + storageAccountName string + containerName string + keyName string + accessKey string +} + +func setupResources(t *testing.T, keyName string) testResources { + clients := getTestClient(t) + + ri := acctest.RandInt() + rs := acctest.RandString(4) + res := testResources{ + resourceGroupName: fmt.Sprintf("terraform-backend-testing-%d", ri), + storageAccountName: fmt.Sprintf("tfbackendtesting%s", rs), + containerName: "terraform", + keyName: keyName, + } + + location := os.Getenv("ARM_LOCATION") + if location == "" { + location = "westus" + } + + t.Logf("creating resource group %s", res.resourceGroupName) + _, err := clients.groupsClient.CreateOrUpdate(res.resourceGroupName, resources.Group{Location: &location}) + if err != nil { + t.Fatalf("failed to create test resource group: %s", err) + } + + t.Logf("creating storage account %s", res.storageAccountName) + _, err = clients.storageAccountsClient.Create(res.resourceGroupName, res.storageAccountName, armStorage.AccountCreateParameters{ + Sku: &armStorage.Sku{ + Name: armStorage.StandardLRS, + Tier: armStorage.Standard, + }, + Location: &location, + }, make(chan struct{})) + if err != nil { + destroyResources(t, res.resourceGroupName) + t.Fatalf("failed to create test storage account: %s", err) + } + + t.Log("fetching access key for storage account") + resp, err := clients.storageAccountsClient.ListKeys(res.resourceGroupName, res.storageAccountName) + if err != nil { + destroyResources(t, res.resourceGroupName) + t.Fatalf("failed to list storage account keys %s:", err) + } + + keys := *resp.Keys + res.accessKey = *keys[0].Value + + storageClient, err := storage.NewClient(res.storageAccountName, res.accessKey, + clients.environment.StorageEndpointSuffix, storage.DefaultAPIVersion, true) + if err != nil { + destroyResources(t, res.resourceGroupName) + t.Fatalf("failed to list storage account keys %s:", err) + } + + t.Logf("creating container %s", res.containerName) + container := storageClient.GetBlobService().GetContainerReference(res.containerName) + err = container.Create() + if err != nil { + destroyResources(t, res.resourceGroupName) + t.Fatalf("failed to create storage container: %s", err) + } + + return res +} + +func destroyResources(t *testing.T, resourceGroupName string) { + warning := "WARNING: Failed to delete the test Azure resources. They may incur charges. (error was %s)" + + clients := getTestClient(t) + + t.Log("destroying created resources") + + // destroying is simple as deleting the resource group will destroy everything else + _, err := clients.groupsClient.Delete(resourceGroupName, make(chan struct{})) + if err != nil { + t.Logf(warning, err) + return + } + + t.Log("Azure resources destroyed") +} + +type testClient struct { + subscriptionID string + tenantID string + clientID string + clientSecret string + environment azure.Environment + groupsClient resources.GroupsClient + storageAccountsClient armStorage.AccountsClient +} + +func getTestClient(t *testing.T) testClient { + client := testClient{ + subscriptionID: os.Getenv("ARM_SUBSCRIPTION_ID"), + tenantID: os.Getenv("ARM_TENANT_ID"), + clientID: os.Getenv("ARM_CLIENT_ID"), + clientSecret: os.Getenv("ARM_CLIENT_SECRET"), + } + + if client.subscriptionID == "" || client.tenantID == "" || client.clientID == "" || client.clientSecret == "" { + t.Fatal("Azure credentials missing or incomplete") + } + + env, err := getAzureEnvironment(os.Getenv("ARM_ENVIRONMENT")) + if err != nil { + t.Fatalf("Failed to detect Azure environment from ARM_ENVIRONMENT value: %s", os.Getenv("ARM_ENVIRONMENT")) + } + client.environment = env + + oauthConfig, err := env.OAuthConfigForTenant(client.tenantID) + if err != nil { + t.Fatalf("Failed to get OAuth config: %s", err) + } + + spt, err := azure.NewServicePrincipalToken(*oauthConfig, client.clientID, client.clientSecret, env.ResourceManagerEndpoint) + if err != nil { + t.Fatalf("Failed to create Service Principal Token: %s", err) + } + + client.groupsClient = resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, client.subscriptionID) + client.groupsClient.Authorizer = spt + + client.storageAccountsClient = armStorage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, client.subscriptionID) + client.storageAccountsClient.Authorizer = spt + + return client +} diff --git a/backend/remote-state/azure/client.go b/backend/remote-state/azure/client.go new file mode 100644 index 000000000..bc503579f --- /dev/null +++ b/backend/remote-state/azure/client.go @@ -0,0 +1,238 @@ +package azure + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/arm/storage" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + leaseHeader = "x-ms-lease-id" + // Must be lower case + lockInfoMetaKey = "terraformlockid" +) + +type RemoteClient struct { + blobClient storage.BlobStorageClient + containerName string + keyName string + leaseID string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + blob, err := c.blobClient.GetBlob(c.containerName, c.keyName) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok { + if storErr.Code == "BlobNotFound" { + return nil, nil + } + } + return nil, err + } + + defer blob.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, blob); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + headers := map[string]string{ + "Content-Type": "application/json", + } + + if c.leaseID != "" { + headers[leaseHeader] = c.leaseID + } + + log.Print("[DEBUG] Uploading remote state to Azure") + + err := c.blobClient.CreateBlockBlobFromReader( + c.containerName, + c.keyName, + uint64(len(data)), + bytes.NewReader(data), + headers, + ) + + if err != nil { + return fmt.Errorf("Failed to upload state: %v", err) + } + + return nil +} + +func (c *RemoteClient) Delete() error { + headers := map[string]string{} + if c.leaseID != "" { + headers[leaseHeader] = c.leaseID + } + + return c.blobClient.DeleteBlob(c.containerName, c.keyName, headers) +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) + info.Path = stateName + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + getLockInfoErr := func(err error) error { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + return &state.LockError{ + Err: err, + Info: lockInfo, + } + } + + leaseID, err := c.blobClient.AcquireLease(c.containerName, c.keyName, -1, info.ID) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" { + return "", getLockInfoErr(err) + } + + // failed to lock as there was no state blob, write empty state + stateMgr := &remote.State{Client: c} + + // ensure state is actually empty + if err := stateMgr.RefreshState(); err != nil { + return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err) + } + + log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state") + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + return "", fmt.Errorf("Failed to write empty state for locking: %s", err) + } + if err := stateMgr.PersistState(); err != nil { + return "", fmt.Errorf("Failed to persist empty state for locking: %s", err) + } + } + + leaseID, err = c.blobClient.AcquireLease(c.containerName, c.keyName, -1, info.ID) + if err != nil { + return "", getLockInfoErr(err) + } + } + + info.ID = leaseID + c.leaseID = leaseID + + if err := c.writeLockInfo(info); err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + meta, err := c.blobClient.GetBlobMetadata(c.containerName, c.keyName) + if err != nil { + return nil, err + } + + raw := meta[lockInfoMetaKey] + if raw == "" { + return nil, fmt.Errorf("blob metadata %s was empty", lockInfoMetaKey) + } + + data, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal(data, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +// writes info to blob meta data, deletes metadata entry if info is nil +func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error { + meta, err := c.blobClient.GetBlobMetadata(c.containerName, c.keyName) + if err != nil { + return err + } + + if info == nil { + delete(meta, lockInfoMetaKey) + } else { + value := base64.StdEncoding.EncodeToString(info.Marshal()) + meta[lockInfoMetaKey] = value + } + + headers := map[string]string{ + leaseHeader: c.leaseID, + } + return c.blobClient.SetBlobMetadata(c.containerName, c.keyName, meta, headers) + +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + if err := c.writeLockInfo(nil); err != nil { + lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) + return lockErr + } + + err = c.blobClient.ReleaseLease(c.containerName, c.keyName, id) + if err != nil { + lockErr.Err = err + return lockErr + } + + c.leaseID = "" + + return nil +} diff --git a/backend/remote-state/azure/client_test.go b/backend/remote-state/azure/client_test.go new file mode 100644 index 000000000..7abef94b8 --- /dev/null +++ b/backend/remote-state/azure/client_test.go @@ -0,0 +1,69 @@ +package azure + +import ( + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state/remote" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + + keyName := "testState" + res := setupResources(t, keyName) + defer destroyResources(t, res.resourceGroupName) + + b := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + state, err := b.State(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + + keyName := "testState" + res := setupResources(t, keyName) + defer destroyResources(t, res.resourceGroupName) + + b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.containerName, + "key": keyName, + "access_key": res.accessKey, + }).(*Backend) + + s1, err := b1.State(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.State(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} diff --git a/state/remote/remote.go b/state/remote/remote.go index 03506ae1b..132b77f70 100644 --- a/state/remote/remote.go +++ b/state/remote/remote.go @@ -46,7 +46,6 @@ func NewClient(t string, conf map[string]string) (Client, error) { // NewClient. var BuiltinClients = map[string]Factory{ "artifactory": artifactoryFactory, - "azure": azureFactory, "etcd": etcdFactory, "gcs": gcsFactory, "http": httpFactory, diff --git a/website/docs/backends/types/azure.html.md b/website/docs/backends/types/azure.html.md index 043bcb154..6095b54a9 100644 --- a/website/docs/backends/types/azure.html.md +++ b/website/docs/backends/types/azure.html.md @@ -8,7 +8,7 @@ description: |- # azure -**Kind: Standard (with no locking)** +**Kind: Standard (with state locking)** Stores the state as a given key in a given bucket on [Microsoft Azure Storage](https://azure.microsoft.com/en-us/documentation/articles/storage-introduction/). @@ -47,11 +47,17 @@ The following configuration options are supported: * `storage_account_name` - (Required) The name of the storage account * `container_name` - (Required) The name of the container to use within the storage account * `key` - (Required) The key where to place/look for state file inside the container - * `access_key` / `ARM_ACCESS_KEY` - (Required) Storage account access key - * `lease_id` / `ARM_LEASE_ID` - (Optional) If set, will be used when writing to storage blob. - * `resource_group_name` - (Optional) The name of the resource group for the storage account. Required if `access_key` isn't specified. + * `access_key` / `ARM_ACCESS_KEY` - (Optional) Storage account access key * `environment` / `ARM_ENVIRONMENT` - (Optional) The cloud environment to use. Supported values are: * `public` (default) * `usgovernment` * `german` * `china` + +The following configuration options must be supplied if `access_key` is not. + + * `resource_group_name` - The resource group which contains the storage account. + * `subscription_id` / `ARM_SUBSCRIPTION_ID` - The Azure Subscription ID. + * `client_id` / `ARM_CLIENT_ID` - The Azure Client ID. + * `client_secret` / `ARM_CLIENT_SECRET` - The Azure Client Secret. + * `tenant_id` / `ARM_TENANT_ID` - The Azure Tenant ID.