From beee89c521303b190818fdab235c9a8264305e2f Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 13 Aug 2016 19:59:25 +0000 Subject: [PATCH] provider/openstack: Fix Volume Attachment Detection in Instances This commit is changing the `volumes` block from being computed to non-computed. This change makes the Terraform configuration the source of truth about volumes attached to the instance and therefore is able to correctly detect when a user detaches a volume during an update. One thing to be aware of is that if a user attached a volume out of band of an instance controlled by Terraform, that volume will be detached upon the next apply. The best thing to do is add a `volume` entry in the instance's configuration of any volumes that were attached out of band. This commit also explicitly detaches volumes from an instance before the instance terminates. Most Block Storage volume drivers account for this scenario internally, but there are a few that don't. This change is to support those that don't. In addition, when volumes are read by the instance, volumes configured in the Terraform configuration are the source of truth. Previously, a call was being made to OpenStack to provide the list of attached volumes. It also adds a few new tests and fixes existing tests for various volume attach-related scenarios. --- .../resource_openstack_compute_instance_v2.go | 104 ++++----- ...urce_openstack_compute_instance_v2_test.go | 208 +++++++++++++++++- 2 files changed, 238 insertions(+), 74 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 73ee7f211..1af7eb60d 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -24,7 +24,6 @@ import ( "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" "github.com/rackspace/gophercloud/openstack/compute/v2/images" "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - "github.com/rackspace/gophercloud/pagination" ) func resourceComputeInstanceV2() *schema.Resource { @@ -238,17 +237,16 @@ func resourceComputeInstanceV2() *schema.Resource { "volume": &schema.Schema{ Type: schema.TypeSet, Optional: true, - Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, }, "volume_id": &schema.Schema{ Type: schema.TypeString, - Optional: true, - Computed: true, + Required: true, }, "device": &schema.Schema{ Type: schema.TypeString, @@ -351,12 +349,6 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e return err } - // determine if volume configuration is correct - // this includes ensuring volume_ids are set - if err := checkVolumeConfig(d); err != nil { - return err - } - // determine if block_device configuration is correct // this includes valid combinations and required attributes if err := checkBlockDeviceConfig(d); err != nil { @@ -712,16 +704,12 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e } if d.HasChange("volume") { - // ensure the volume configuration is correct - if err := checkVolumeConfig(d); err != nil { - return err - } - // old attachments and new attachments oldAttachments, newAttachments := d.GetChange("volume") // for each old attachment, detach the volume oldAttachmentSet := oldAttachments.(*schema.Set).List() + log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", oldAttachmentSet) if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { return err } else { @@ -814,6 +802,22 @@ func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error creating OpenStack compute client: %s", err) } + // Make sure all volumes are detached before deleting + volumes := d.Get("volume") + if volumeSet, ok := volumes.(*schema.Set); ok { + volumeList := volumeSet.List() + if len(volumeList) > 0 { + log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", volumeList) + if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { + return err + } else { + if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), volumeList); err != nil { + return err + } + } + } + } + if d.Get("stop_before_destroy").(bool) { err = startstop.Stop(computeClient, d.Id()).ExtractErr() if err != nil { @@ -1397,6 +1401,7 @@ func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockCl va := v.(map[string]interface{}) aId := va["id"].(string) + log.Printf("[INFO] Attempting to detach volume %s", va["volume_id"]) if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil { return err } @@ -1420,65 +1425,40 @@ func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockCl } func getVolumeAttachments(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error { - var attachments []volumeattach.VolumeAttachment - - err := volumeattach.List(computeClient, d.Id()).EachPage(func(page pagination.Page) (bool, error) { - actual, err := volumeattach.ExtractVolumeAttachments(page) - if err != nil { - return false, err - } - - attachments = actual - return true, nil - }) + var vols []map[string]interface{} + allPages, err := volumeattach.List(computeClient, d.Id()).AllPages() if err != nil { return err } - var vols []map[string]interface{} - for _, attachment := range attachments { - // ignore the volume if it is attached as a root device - rootDevFound := false - for _, rootDev := range []string{"/dev/vda", "/dev/xda", "/dev/sda", "/dev/xvda"} { - if attachment.Device == rootDev { - rootDevFound = true - } - } - - if rootDevFound { - continue - } - - vol := make(map[string]interface{}) - vol["id"] = attachment.ID - vol["volume_id"] = attachment.VolumeID - vol["device"] = attachment.Device - vols = append(vols, vol) + allVolumeAttachments, err := volumeattach.ExtractVolumeAttachments(allPages) + if err != nil { + return err } - log.Printf("[INFO] Volume attachments: %v", vols) - d.Set("volume", vols) - return nil -} - -func checkVolumeConfig(d *schema.ResourceData) error { - // Although a volume_id is required to attach a volume, in order to be able to report - // the attached volumes of an instance, it must be "computed" and thus "optional". - // This accounts for situations such as "boot from volume" as well as volumes being - // attached to the instance outside of Terraform. - if v := d.Get("volume"); v != nil { - vols := v.(*schema.Set).List() - if len(vols) > 0 { - for _, v := range vols { - va := v.(map[string]interface{}) - if va["volume_id"].(string) == "" { - return fmt.Errorf("A volume_id must be specified when attaching volumes.") + if v, ok := d.GetOk("volume"); ok { + volumes := v.(*schema.Set).List() + for _, volume := range volumes { + if volumeMap, ok := volume.(map[string]interface{}); ok { + if v, ok := volumeMap["volume_id"].(string); ok { + for _, volumeAttachment := range allVolumeAttachments { + if v == volumeAttachment.ID { + vol := make(map[string]interface{}) + vol["id"] = volumeAttachment.ID + vol["volume_id"] = volumeAttachment.VolumeID + vol["device"] = volumeAttachment.Device + vols = append(vols, vol) + } + } } } } } + log.Printf("[INFO] Volume attachments: %v", vols) + d.Set("volume", vols) + return nil } diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go index db54e21c4..7a67a69be 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud" "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" @@ -148,6 +149,11 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) { }`) var testAccComputeV2Instance_volumeDetachPostCreationInstance = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "myvol" { + name = "myvol" + size = 1 + } + resource "openstack_compute_instance_v2" "foo" { name = "terraform-test" security_groups = ["default"] @@ -169,7 +175,7 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) { resource.TestStep{ Config: testAccComputeV2Instance_volumeDetachPostCreationInstance, Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeDoesNotExist(t, "openstack_blockstorage_volume_v1.myvol", &volume), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.myvol", &volume), testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), testAccCheckComputeV2InstanceVolumesDetached(&instance), ), @@ -178,11 +184,12 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) { }) } -func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) { +func TestAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation(t *testing.T) { var instance servers.Server - var volume volumes.Volume + var volume_1 volumes.Volume + var volume_2 volumes.Volume - var testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume = fmt.Sprintf(` + var testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreationInstanceAndVolume = fmt.Sprintf(` resource "openstack_blockstorage_volume_v1" "root_volume" { name = "root_volume" @@ -246,22 +253,22 @@ func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) { CheckDestroy: testAccCheckComputeV2InstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume, + Config: testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreationInstanceAndVolume, Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume), - testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume_1), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume_2), testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_2), ), }, resource.TestStep{ Config: testAccComputeV2Instance_volumeDetachPostCreationInstance, Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume), - testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume_1), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume_2), testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.additional_volume"), ), }, @@ -269,6 +276,159 @@ func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) { }) } +func TestAccComputeV2Instance_volumeAttachInstanceDelete(t *testing.T) { + var instance servers.Server + var volume_1 volumes.Volume + var volume_2 volumes.Volume + + var testAccComputeV2Instance_volumeAttachInstanceDelete_1 = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "root_volume" { + name = "root_volume" + size = 1 + image_id = "%s" + } + + resource "openstack_blockstorage_volume_v1" "additional_volume" { + name = "additional_volume" + size = 1 + } + + resource "openstack_compute_instance_v2" "foo" { + name = "terraform-test" + security_groups = ["default"] + + block_device { + uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" + source_type = "volume" + boot_index = 0 + destination_type = "volume" + delete_on_termination = false + } + + volume { + volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}" + } + }`, + os.Getenv("OS_IMAGE_ID")) + + var testAccComputeV2Instance_volumeAttachInstanceDelete_2 = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "root_volume" { + name = "root_volume" + size = 1 + image_id = "%s" + } + + resource "openstack_blockstorage_volume_v1" "additional_volume" { + name = "additional_volume" + size = 1 + }`, + os.Getenv("OS_IMAGE_ID")) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_volumeAttachInstanceDelete_1, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume_1), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume_2), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_2), + ), + }, + resource.TestStep{ + Config: testAccComputeV2Instance_volumeAttachInstanceDelete_2, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume_1), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume_2), + testAccCheckComputeV2InstanceDoesNotExist(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.root_volume"), + testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.additional_volume"), + ), + }, + }, + }) +} + +func TestAccComputeV2Instance_volumeAttachToNewInstance(t *testing.T) { + var instance_1 servers.Server + var instance_2 servers.Server + var volume_1 volumes.Volume + + var testAccComputeV2Instance_volumeAttachToNewInstance_1 = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "volume_1" { + name = "volume_1" + size = 1 + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default"] + + volume { + volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" + } + } + + resource "openstack_compute_instance_v2" "instance_2" { + depends_on = ["openstack_compute_instance_v2.instance_1"] + name = "instance_2" + security_groups = ["default"] + }`) + + var testAccComputeV2Instance_volumeAttachToNewInstance_2 = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "volume_1" { + name = "volume_1" + size = 1 + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default"] + } + + resource "openstack_compute_instance_v2" "instance_2" { + depends_on = ["openstack_compute_instance_v2.instance_1"] + name = "instance_2" + security_groups = ["default"] + + volume { + volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" + } + }`) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_volumeAttachToNewInstance_1, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.volume_1", &volume_1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance_1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance_2), + testAccCheckComputeV2InstanceVolumeAttachment(&instance_1, &volume_1), + testAccCheckComputeV2InstanceVolumeDetached(&instance_2, "openstack_blockstorage_volume_v1.volume_1"), + ), + }, + resource.TestStep{ + Config: testAccComputeV2Instance_volumeAttachToNewInstance_2, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.volume_1", &volume_1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance_1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance_2), + testAccCheckComputeV2InstanceVolumeDetached(&instance_1, "openstack_blockstorage_volume_v1.volume_1"), + testAccCheckComputeV2InstanceVolumeAttachment(&instance_2, &volume_1), + ), + }, + }, + }) +} + func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) { var instance servers.Server var fip floatingip.FloatingIP @@ -928,6 +1088,30 @@ func testAccCheckComputeV2InstanceExists(t *testing.T, n string, instance *serve } } +func testAccCheckComputeV2InstanceDoesNotExist(t *testing.T, n string, instance *servers.Server) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2InstanceExists) Error creating OpenStack compute client: %s", err) + } + + _, err = servers.Get(computeClient, instance.ID).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return err + } + if errCode.Actual == 404 { + return nil + } + return err + } + + return fmt.Errorf("Instance still exists") + } +} + func testAccCheckComputeV2InstanceMetadata( instance *servers.Server, k string, v string) resource.TestCheckFunc { return func(s *terraform.State) error {